diff --git a/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt b/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt new file mode 100644 index 00000000..e153391f --- /dev/null +++ b/vendor/riscv-isa-sim/.github/workflows/apt-packages.txt @@ -0,0 +1,2 @@ +build-essential +device-tree-compiler diff --git a/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml b/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml new file mode 100644 index 00000000..aeaf460e --- /dev/null +++ b/vendor/riscv-isa-sim/.github/workflows/continuous-integration.yml @@ -0,0 +1,28 @@ +# This file describes the GitHub Actions workflow for continuous integration of Spike. +# +# See +# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions +# for API reference documentation on this file format. + +name: Continuous Integration + +on: + push: + branches: + - master + pull_request: + branches: + - master + + +jobs: + test: + name: Test Spike build + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Install Dependencies + run: sudo xargs apt-get install -y < .github/workflows/apt-packages.txt + + - run: ci-tests/test-spike diff --git a/vendor/riscv-isa-sim/.gitignore b/vendor/riscv-isa-sim/.gitignore new file mode 100644 index 00000000..14326e9c --- /dev/null +++ b/vendor/riscv-isa-sim/.gitignore @@ -0,0 +1,7 @@ +build/ +*.gch +autom4te.cache/ +.*.swp +*.o +*.d +.gdb_history diff --git a/vendor/riscv-isa-sim/ChangeLog.md b/vendor/riscv-isa-sim/ChangeLog.md new file mode 100644 index 00000000..144cb135 --- /dev/null +++ b/vendor/riscv-isa-sim/ChangeLog.md @@ -0,0 +1,36 @@ +Version 1.1.0 +------------- +- Zbkb, Zbkc, Zbkx, Zknd, Zkne, Zknh, Zksed, Zksh scalar cryptography extensions (Zk, Zkn, and Zks groups), v1.0 +- Zkr virtual entropy source emulation, v1.0 +- V extension, v1.0 +- P extension, v0.9.2 +- Zba extension, v1.0 +- Zbb extension, v1.0 +- Zbc extension, v1.0 +- Zbs extension, v1.0 +- Hypervisor extension, v1.0 +- Svnapot extension, v1.0 +- Svpbmt extension, v1.0 +- Svinval extension, v1.0 + +Version 1.0.1-dev +----------------- +- Preliminary support for a subset of the Vector Extension, v0.7.1. +- Support S-mode vectored interrupts (i.e. `stvec[0]` is now writable). +- Added support for dynamic linking of libraries containing MMIO devices. +- Added `--priv` flag to control which privilege modes are available. +- When the commit log is enabled at configure time (`--enable-commitlog`), + it must also be enabled at runtime with the `--log-commits` option. +- Several debug-related additions and changes: + - Added `hasel` debug feature. + - Added `--dm-no-abstract-csr` command-line option. + - Added `--dm-no-halt-groups` command line option. + - Renamed `--progsize` to `--dm-progsize`. + - Renamed `--debug-sba` to `--dm-sba`. + - Renamed `--debug-auth` to `--dm-auth`. + - Renamed `--abstract-rti` to `--dm-abstract-rti`. + - Renamed `--without-hasel` to `--dm-no-hasel`. + +Version 1.0.0 (2019-03-30) +-------------------------- +- First versioned release. diff --git a/vendor/riscv-isa-sim/LICENSE b/vendor/riscv-isa-sim/LICENSE new file mode 100644 index 00000000..34f576ba --- /dev/null +++ b/vendor/riscv-isa-sim/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2010-2017, The Regents of the University of California +(Regents). All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +3. Neither the name of the Regents nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING +OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS +BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED +HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE +MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. diff --git a/vendor/riscv-isa-sim/Makefile.in b/vendor/riscv-isa-sim/Makefile.in new file mode 100644 index 00000000..d4cb83e7 --- /dev/null +++ b/vendor/riscv-isa-sim/Makefile.in @@ -0,0 +1,524 @@ +#========================================================================= +# Toplevel Makefile for the Modular C++ Build System +#========================================================================= +# Please read the documenation in 'mcppbs-doc.txt' for more details on +# how the Modular C++ Build System works. For most projects, a developer +# will not need to make any changes to this makefile. The key targets +# are as follows: +# +# - default : build all libraries and programs +# - check : build and run all unit tests +# - install : install headers, project library, and some programs +# - clean : remove all generated content (except autoconf files) +# - dist : make a source tarball +# - distcheck : make a source tarball, untar it, check it, clean it +# - distclean : remove everything +# + +#------------------------------------------------------------------------- +# Basic setup +#------------------------------------------------------------------------- + +# Remove all default implicit rules since they can cause subtle bugs +# and they just make things run slower +.SUFFIXES: +% : %,v +% : RCS/%,v +% : RCS/% +% : s.% +% : SCCS/s.% + +# Default is to build the prereqs of the all target (defined at bottom) +default : all +.PHONY : default + +project_name := @PACKAGE_TARNAME@ +src_dir := @srcdir@ +scripts_dir := $(src_dir)/scripts + +HAVE_INT128 := @HAVE_INT128@ +HAVE_DLOPEN := @HAVE_DLOPEN@ +HAVE_CLANG_PCH := @HAVE_CLANG_PCH@ + +# If the version information is not in the configure script, then we +# assume that we are in a working directory. We use the vcs-version.sh +# script in the scripts directory to generate an appropriate version +# string. Currently the way things are setup we have to run this script +# everytime we run make so the script needs to be as fast as possible. + +ifeq (@PACKAGE_VERSION@,?) + project_ver:=$(shell $(scripts_dir)/vcs-version.sh $(src_dir)) +else + project_ver:=@PACKAGE_VERSION@ +endif + +# Installation directories + +prefix ?= @prefix@ + +INSTALLDIR ?= $(DESTDIR)$(prefix) + +install_hdrs_dir := $(INSTALLDIR)/include +install_libs_dir := $(INSTALLDIR)/lib +install_exes_dir := $(INSTALLDIR)/bin + +#------------------------------------------------------------------------- +# List of subprojects +#------------------------------------------------------------------------- + +sprojs := @subprojects@ +sprojs_enabled := @subprojects_enabled@ + +sprojs_include := -I. -I$(src_dir) $(addprefix -I$(src_dir)/, $(sprojs_enabled)) +VPATH := $(addprefix $(src_dir)/, $(sprojs_enabled)) + +#------------------------------------------------------------------------- +# Programs and flags +#------------------------------------------------------------------------- + +# C++ compiler +# - CPPFLAGS : flags for the preprocessor (eg. -I,-D) +# - CXXFLAGS : flags for C++ compiler (eg. -Wall,-g,-O3) +# +# To allow a user to specify CFLAGS or similar as part of the Make +# command, we also have mcpps-CFLAGS etc. with stuff that shouldn't be +# lost in such a case. +# +# The order of precedence (highest to lowest) is then: +# +# - Specified as part of Make command line +# - Specified as part of running configure +# - Specified here (default-CFLAGS) +# +# These all appear on the command line, from lowest precedence to +# highest. + +default-CFLAGS := -DPREFIX=\"$(prefix)\" -Wall -Wno-unused -Wno-nonportable-include-path -g -O2 -fPIC +default-CXXFLAGS := $(default-CFLAGS) -std=c++17 + +mcppbs-CPPFLAGS := @CPPFLAGS@ +mcppbs-CFLAGS := $(default-CFLAGS) @CFLAGS@ +mcppbs-CXXFLAGS := $(default-CXXFLAGS) @CXXFLAGS@ + +CC := @CC@ +CXX := @CXX@ + +# These are the flags actually used for a C++ compile or a C compile. +# The language-specific flags come after the preprocessor flags, but +# user-supplied flags always take precedence. +all-cxx-flags := \ + $(mcppbs-CPPFLAGS) $(mcppbs-CXXFLAGS) $(CPPFLAGS) $(CXXFLAGS) +all-c-flags := \ + $(mcppbs-CPPFLAGS) $(mcppbs-CFLAGS) $(CPPFLAGS) $(CFLAGS) + +COMPILE := $(CXX) -MMD -MP $(all-cxx-flags) $(sprojs_include) @BOOST_CPPFLAGS@ +COMPILE_C := $(CC) -MMD -MP $(all-c-flags) $(sprojs_include) + +# Linker +# - LDFLAGS : Flags for the linker (eg. -L) +# - LIBS : Library flags (eg. -l) + +mcppbs-LDFLAGS := @LDFLAGS@ @BOOST_LDFLAGS@ +all-link-flags := $(mcppbs-LDFLAGS) $(LDFLAGS) + +comma := , +LD := $(CXX) +LIBS := @LIBS@ @BOOST_ASIO_LIB@ @BOOST_REGEX_LIB@ +LINK := $(LD) -L. $(all-link-flags) -Wl,-rpath,$(install_libs_dir) $(patsubst -L%,-Wl$(comma)-rpath$(comma)%,$(filter -L%,$(LDFLAGS))) + +# Library creation + +AR := @AR@ +RANLIB := @RANLIB@ + +# Host simulator + +RUN := @RUN@ +RUNFLAGS := @RUNFLAGS@ + +# Installation + +MKINSTALLDIRS := $(scripts_dir)/mk-install-dirs.sh +INSTALL := @INSTALL@ +INSTALL_HDR := $(INSTALL) -m 644 +INSTALL_LIB := $(INSTALL) -m 644 +INSTALL_EXE := $(INSTALL) -m 755 +STOW := @stow@ + +# Tests +bintests = $(src_dir)/tests/ebreak.py + +#------------------------------------------------------------------------- +# Include subproject makefile fragments +#------------------------------------------------------------------------- + +sprojs_mk = $(addsuffix .mk, $(sprojs_enabled)) + +-include $(sprojs_mk) + +dist_junk += $(sprojs_mk) + +#------------------------------------------------------------------------- +# Reverse list helper function +#------------------------------------------------------------------------- +# This function is used by the subproject template to reverse the list +# of dependencies. It uses recursion to perform the reversal. +# +# Arguments: +# $(1) : space separated input list +# retval : input list in reverse order +# + +reverse_list = $(call reverse_list_h,$(1),) +define reverse_list_h + $(if $(strip $(1)), \ + $(call reverse_list_h, \ + $(wordlist 2,$(words $(1)),$(1)), \ + $(firstword $(1)) $(2)), \ + $(2)) +endef + +#------------------------------------------------------------------------- +# Template for per subproject rules +#------------------------------------------------------------------------- +# The template is instantiated for each of the subprojects. It relies on +# subprojects defining a certain set of make variables which are all +# prefixed with the subproject name. Since subproject names can have +# dashes in them (and the make variables are assumed to only use +# underscores) the template takes two arguments - one with the regular +# subproject name and one with dashes replaced with underscores. +# +# Arguments: +# $(1) : real subproject name (ie with dashes) +# $(2) : normalized subproject name (ie dashes replaced with underscores) +# + +define subproject_template + +# In some (rare) cases, a subproject might not have any actual object +# files. It might only include header files or program sources. To keep +# things consistent we still want a library for this subproject, so in +# this spectial case we create a dummy source file and thus the build +# system will create a library for this subproject with just the +# corresponding dummy object file. + +ifeq ($$(strip $$($(2)_srcs) $$($(2)_c_srcs)),) +$(2)_srcs += _$(1).cc +$(2)_junk += _$(1).cc +endif + +_$(1).cc : + echo "int _$(2)( int arg ) { return arg; }" > $$@ + +# Build the object files for this subproject + +$(2)_pch := $$(patsubst %.h, %.h.gch, $$($(2)_precompiled_hdrs)) +$(2)_objs := $$(patsubst %.cc, %.o, $$($(2)_srcs)) +$(2)_c_objs := $$(patsubst %.c, %.o, $$($(2)_c_srcs)) +$(2)_deps := $$(patsubst %.o, %.d, $$($(2)_objs)) +$(2)_deps += $$(patsubst %.o, %.d, $$($(2)_c_objs)) +$(2)_deps += $$(patsubst %.h, %.h.d, $$($(2)_precompiled_hdrs)) +$$($(2)_pch) : %.h.gch : %.h + $(COMPILE) -x c++-header $$< -o $$@ +$$($(2)_objs) : %.o : %.cc $$($(2)_gen_hdrs) $$($(2)_pch) + $(COMPILE) $(if $(HAVE_CLANG_PCH), $$(if $$($(2)_pch), -include-pch $$($(2)_pch))) $$($(2)_CFLAGS) -c $$< +$$($(2)_c_objs) : %.o : %.c $$($(2)_gen_hdrs) + $(COMPILE_C) $$($(2)_CFLAGS) -c $$< + +$(2)_junk += $$($(2)_pch) $$($(2)_objs) $$($(2)_c_objs) $$($(2)_deps) \ + $$($(2)_gen_hdrs) + +# Reverse the dependency list so that a given subproject only depends on +# subprojects listed to its right. This is the correct order for linking +# the list of subproject libraries. + +$(2)_reverse_deps := $$(call reverse_list,$$($(2)_subproject_deps)) + +# Build a library for this subproject + +$(2)_lib_libs := $$($(2)_reverse_deps) +$(2)_lib_libnames := $$(patsubst %, lib%.a, $$($(2)_lib_libs)) +$(2)_lib_libarg := $$(patsubst %, -l%, $$($(2)_lib_libs)) +$(2)_lib_libnames_shared := $$(if $$($(2)_install_shared_lib),lib$(1).so,) + +lib$(1).a : $$($(2)_objs) $$($(2)_c_objs) $$($(2)_lib_libnames) + $(AR) rcs $$@ $$^ +lib$(1).so : $$($(2)_objs) $$($(2)_c_objs) $$($(2)_lib_libnames_shared) $$($(2)_lib_libnames) + $(LINK) -shared -o $$@ $(if $(filter Darwin,$(shell uname -s)),-install_name $(install_libs_dir)/$$@) $$^ $$($(2)_lib_libnames) $(LIBS) + +$(2)_junk += lib$(1).a +$(2)_junk += $$(if $$($(2)_install_shared_lib),lib$(1).so,) + +# Build unit tests + +$(2)_test_objs := $$(patsubst %.cc, %.o, $$($(2)_test_srcs)) +$(2)_test_deps := $$(patsubst %.o, %.d, $$($(2)_test_objs)) +$(2)_test_exes := $$(patsubst %.t.cc, %-utst, $$($(2)_test_srcs)) +$(2)_test_outs := $$(patsubst %, %.out, $$($(2)_test_exes)) +$(2)_test_libs := $(1) $$($(2)_reverse_deps) utst +$(2)_test_libnames := $$(patsubst %, lib%.a, $$($(2)_test_libs)) +$(2)_test_libarg := $$(patsubst %, -l%, $$($(2)_test_libs)) + +$$($(2)_test_objs) : %.o : %.cc + $(COMPILE) -c $$< + +$$($(2)_test_exes) : %-utst : %.t.o $$($(2)_test_libnames) + $(LINK) -o $$@ $$< $$($(2)_test_libnames) $(LIBS) + +$(2)_deps += $$($(2)_test_deps) +$(2)_junk += \ + $$($(2)_test_objs) $$($(2)_test_deps) \ + $$($(2)_test_exes) *.junk-dat + +# Run unit tests + +$$($(2)_test_outs) : %.out : % + $(RUN) $(RUNFLAGS) ./$$< default | tee $$@ + +$(2)_junk += $$($(2)_test_outs) + +# Build programs + +$(2)_prog_objs := $$(patsubst %.cc, %.o, $$($(2)_prog_srcs)) +$(2)_prog_deps := $$(patsubst %.o, %.d, $$($(2)_prog_objs)) +$(2)_prog_exes := $$(patsubst %.cc, %, $$($(2)_prog_srcs)) +$(2)_prog_libs := $(1) $$($(2)_reverse_deps) +$(2)_prog_libnames := $$(patsubst %, lib%.a, $$($(2)_prog_libs)) +$(2)_prog_libarg := $$(patsubst %, -l%, $$($(2)_prog_libs)) + +$$($(2)_prog_objs) : %.o : %.cc + $(COMPILE) -c $$< + +$$($(2)_prog_exes) : % : %.o $$($(2)_prog_libnames) + $(LINK) -o $$@ $$< $$($(2)_prog_libnames) $(LIBS) + +$(2)_deps += $$($(2)_prog_deps) +$(2)_junk += $$($(2)_prog_objs) $$($(2)_prog_deps) $$($(2)_prog_exes) + +# Build programs which will be installed + +$(2)_install_prog_objs := $$(patsubst %.cc, %.o, $$($(2)_install_prog_srcs)) +$(2)_install_prog_deps := $$(patsubst %.o, %.d, $$($(2)_install_prog_objs)) +$(2)_install_prog_exes := $$(patsubst %.cc, %, $$($(2)_install_prog_srcs)) + +$$($(2)_install_prog_objs) : %.o : %.cc $$($(2)_gen_hdrs) + $(COMPILE) -c $$< + +$$($(2)_install_prog_exes) : % : %.o $$($(2)_prog_libnames) + $(LINK) -o $$@ $$< $$($(2)_prog_libnames) $(LIBS) + +$(2)_deps += $$($(2)_install_prog_deps) +$(2)_junk += \ + $$($(2)_install_prog_objs) $$($(2)_install_prog_deps) \ + $$($(2)_install_prog_exes) + +# Subproject specific targets + +all-$(1) : lib$(1).a $$($(2)_install_prog_exes) + +check-$(1) : $$($(2)_test_outs) + echo; grep -h -e'Unit Tests' -e'FAILED' -e'Segementation' $$^; echo + +clean-$(1) : + rm -rf $$($(2)_junk) + +.PHONY : all-$(1) check-$(1) clean-$(1) + +# Update running variables + +libs += lib$(1).a +objs += $$($(2)_objs) +srcs += $$(addprefix $(src_dir)/$(1)/, $$($(2)_srcs)) +hdrs += $$(addprefix $(src_dir)/$(1)/, $$($(2)_hdrs)) $$($(2)_gen_hdrs) +junk += $$($(2)_junk) +deps += $$($(2)_deps) + +test_outs += $$($(2)_test_outs) + +install_config_hdrs += $$(if $$($(2)_install_config_hdr),$(1),) +install_hdrs += $$(addprefix $(src_dir)/$(1)/, $$($(2)_install_hdrs)) +install_libs += $$(if $$($(2)_install_lib),lib$(1).a,) +install_libs += $$(if $$($(2)_install_shared_lib),lib$(1).so,) +install_exes += $$($(2)_install_prog_exes) +install_pcs += $$(if $$($(2)_install_lib),riscv-$(1).pc,) + +endef + +# Iterate over the subprojects and call the template for each one + +$(foreach sproj,$(sprojs_enabled), \ + $(eval $(call subproject_template,$(sproj),$(subst -,_,$(sproj))))) + +#------------------------------------------------------------------------- +# Autodependency files +#------------------------------------------------------------------------- + +-include $(deps) + +deps : $(deps) +.PHONY : deps + +#------------------------------------------------------------------------- +# Check +#------------------------------------------------------------------------- + +bintest_outs = $(bintests:=.out) +junk += $(bintest_outs) +%.out: % all + ./$* < /dev/null 2>&1 | tee $@ + +check-cpp : $(test_outs) + @echo + ! grep -h -e'Unit Tests' -e'FAILED' -e'Segmentation' $^ < /dev/null + @echo + +check-bin : $(bintest_outs) + ! tail -n 1 $^ < /dev/null 2>&1 | grep FAILED + +check : check-cpp check-bin + +.PHONY : check + +#------------------------------------------------------------------------- +# Installation +#------------------------------------------------------------------------- + +install-config-hdrs : config.h + $(MKINSTALLDIRS) $(install_hdrs_dir) + for dir in $(install_config_hdrs); \ + do \ + $(MKINSTALLDIRS) $(install_hdrs_dir)/$$dir; \ + $(INSTALL_HDR) $< $(install_hdrs_dir)/$$dir; \ + done + +install-hdrs : $(install_hdrs) + $(MKINSTALLDIRS) $(install_hdrs_dir) + for file in $(subst $(src_dir)/,,$^); \ + do \ + $(MKINSTALLDIRS) $(install_hdrs_dir)/`dirname $$file`; \ + $(INSTALL_HDR) $(src_dir)/$$file $(install_hdrs_dir)/`dirname $$file`; \ + done + +install-libs : $(install_libs) + $(MKINSTALLDIRS) $(install_libs_dir) + for file in $^; \ + do \ + $(INSTALL_LIB) $$file $(install_libs_dir); \ + done + +install-exes : $(install_exes) + $(MKINSTALLDIRS) $(install_exes_dir) + for file in $^; \ + do \ + $(INSTALL_EXE) $$file $(install_exes_dir); \ + done + +install-pc : $(install_pcs) + $(MKINSTALLDIRS) $(install_libs_dir)/pkgconfig/ + for file in $^; \ + do \ + $(INSTALL_HDR) $$file $(install_libs_dir)/pkgconfig/; \ + done + +install : install-hdrs install-config-hdrs install-libs install-exes install-pc + +.PHONY : install install-hdrs install-config-hdrs install-libs install-exes + +#------------------------------------------------------------------------- +# Regenerate configure information +#------------------------------------------------------------------------- + +config.status : $(src_dir)/configure + ./config.status --recheck + +sprojs_mk_in = \ + $(join $(addprefix $(src_dir)/, $(sprojs_enabled)), \ + $(patsubst %, /%.mk.in, $(sprojs_enabled))) + +Makefile : $(src_dir)/Makefile.in $(sprojs_mk_in) config.status + ./config.status + +dist_junk += config.status config.h Makefile config.log + +#------------------------------------------------------------------------- +# Distribution +#------------------------------------------------------------------------- +# The distribution tarball is named project-ver.tar.gz and it includes +# both enabled and disabled subprojects. + +dist_files = \ + $(sprojs) \ + README \ + style-guide.txt \ + mcppbs-uguide.txt \ + scripts \ + configure.ac \ + aclocal.m4 \ + configure \ + config.h.in \ + Makefile.in \ + +dist_dir := $(project_name)-$(project_ver) +dist_tgz := $(project_name)-$(project_ver).tar.gz + +# Notice that when we make the distribution we rewrite the configure.ac +# script with the current version and we rerun autoconf in the new +# source directory so that the distribution will have the proper version +# information. We also rewrite the "Version : " line in the README. + +dist : + rm -rf $(dist_dir) + mkdir $(dist_dir) + tar -C $(src_dir) -cf - $(dist_files) | tar -C $(dist_dir) -xpf - + sed -i.bak 's/^\(# Version :\).*/\1 $(project_ver)/' $(dist_dir)/README + sed -i.bak 's/\( proj_version,\).*/\1 [$(project_ver)])/' $(dist_dir)/configure.ac + cd $(dist_dir) && \ + autoconf && autoheader && \ + rm -rf autom4te.cache configure.ac.bak README.bak + tar -czvf $(dist_tgz) $(dist_dir) + rm -rf $(dist_dir) + +# You can use the distcheck target to try untarring the distribution and +# then running configure, make, make check, and make distclean. A +# "directory is not empty" error means distclean is not removing +# everything. + +distcheck : dist + rm -rf $(dist_dir) + tar -xzvf $(dist_tgz) + mkdir -p $(dist_dir)/build + cd $(dist_dir)/build; ../configure; make; make check; make distclean + rm -rf $(dist_dir) + +junk += $(project_name)-*.tar.gz + +.PHONY : dist distcheck + +#------------------------------------------------------------------------- +# Default +#------------------------------------------------------------------------- + +all : $(install_hdrs) $(install_libs) $(install_exes) +.PHONY : all + +#------------------------------------------------------------------------- +# Makefile debugging +#------------------------------------------------------------------------- +# This handy rule will display the contents of any make variable by +# using the target debug-. So for example, make debug-junk will +# display the contents of the junk variable. + +debug-% : + @echo $* = $($*) + +#------------------------------------------------------------------------- +# Clean up junk +#------------------------------------------------------------------------- + +clean : + rm -rf *~ \#* $(junk) + +distclean : + rm -rf *~ \#* $(junk) $(dist_junk) + +.PHONY : clean distclean diff --git a/vendor/riscv-isa-sim/README.md b/vendor/riscv-isa-sim/README.md new file mode 100644 index 00000000..6da9fab9 --- /dev/null +++ b/vendor/riscv-isa-sim/README.md @@ -0,0 +1,300 @@ +Spike RISC-V ISA Simulator +============================ + +About +------------- + +Spike, the RISC-V ISA Simulator, implements a functional model of one or more +RISC-V harts. It is named after the golden spike used to celebrate the +completion of the US transcontinental railway. + +Spike supports the following RISC-V ISA features: + - RV32I and RV64I base ISAs, v2.1 + - RV32E and RV64E base ISAs, v1.9 + - Zifencei extension, v2.0 + - Zicsr extension, v2.0 + - M extension, v2.0 + - A extension, v2.1 + - F extension, v2.2 + - D extension, v2.2 + - Q extension, v2.2 + - C extension, v2.0 + - Zbkb, Zbkc, Zbkx, Zknd, Zkne, Zknh, Zksed, Zksh scalar cryptography extensions (Zk, Zkn, and Zks groups), v1.0 + - Zkr virtual entropy source emulation, v1.0 + - V extension, v1.0 (_requires a 64-bit host_) + - P extension, v0.9.2 + - Zba extension, v1.0 + - Zbb extension, v1.0 + - Zbc extension, v1.0 + - Zbs extension, v1.0 + - Conformance to both RVWMO and RVTSO (Spike is sequentially consistent) + - Machine, Supervisor, and User modes, v1.11 + - Hypervisor extension, v1.0 + - Svnapot extension, v1.0 + - Svpbmt extension, v1.0 + - Svinval extension, v1.0 + - CMO extension, v1.0 + - Debug v0.14 + +As a Spike extension, the remainder of the proposed +[Bit-Manipulation Extensions](https://github.com/riscv/riscv-bitmanip) +is provided under the Spike-custom extension name _Xbitmanip_. +These instructions (and, of course, the extension name) are not RISC-V +standards. + +These proposed bit-manipulation extensions can be split into further +groups: Zbp, Zbs, Zbe, Zbf, Zbc, Zbm, Zbr, Zbt. Note that Zbc is +ratified, but the original proposal contained some extra instructions +(64-bit carryless multiplies) which are captured here. + +To enable these extensions individually, use the Spike-custom +extension names _XZbp_, _XZbs_, _XZbc_, and so on. + +Versioning and APIs +------------------- + +Projects are versioned primarily to indicate when the API has been extended or +rendered incompatible. In that spirit, Spike aims to follow the +[SemVer](https://semver.org/spec/v2.0.0.html) versioning scheme, in which +major version numbers are incremented when backwards-incompatible API changes +are made; minor version numbers are incremented when new APIs are added; and +patch version numbers are incremented when bugs are fixed in +a backwards-compatible manner. + +Spike's principal public API is the RISC-V ISA. _The C++ interface to Spike's +internals is **not** considered a public API at this time_, and +backwards-incompatible changes to this interface _will_ be made without +incrementing the major version number. + +Build Steps +--------------- + +We assume that the RISCV environment variable is set to the RISC-V tools +install path. + + $ apt-get install device-tree-compiler + $ mkdir build + $ cd build + $ ../configure --prefix=$RISCV + $ make + $ [sudo] make install + +If your system uses the `yum` package manager, you can substitute +`yum install dtc` for the first step. + +Build Steps on OpenBSD +---------------------- + +Install bash, gmake, dtc, and use clang. + + $ pkg_add bash gmake dtc + $ exec bash + $ export CC=cc; export CXX=c++ + $ mkdir build + $ cd build + $ ../configure --prefix=$RISCV + $ gmake + $ [doas] make install + +Compiling and Running a Simple C Program +------------------------------------------- + +Install spike (see Build Steps), riscv-gnu-toolchain, and riscv-pk. + +Write a short C program and name it hello.c. Then, compile it into a RISC-V +ELF binary named hello: + + $ riscv64-unknown-elf-gcc -o hello hello.c + +Now you can simulate the program atop the proxy kernel: + + $ spike pk hello + +Simulating a New Instruction +------------------------------------ + +Adding an instruction to the simulator requires two steps: + + 1. Describe the instruction's functional behavior in the file + riscv/insns/.h. Examine other instructions + in that directory as a starting point. + + 2. Add the opcode and opcode mask to riscv/opcodes.h. Alternatively, + add it to the riscv-opcodes package, and it will do so for you: + ``` + $ cd ../riscv-opcodes + $ vi opcodes // add a line for the new instruction + $ make install + ``` + + 3. Rebuild the simulator. + +Interactive Debug Mode +--------------------------- + +To invoke interactive debug mode, launch spike with -d: + + $ spike -d pk hello + +To see the contents of an integer register (0 is for core 0): + + : reg 0 a0 + +To see the contents of a floating point register: + + : fregs 0 ft0 + +or: + + : fregd 0 ft0 + +depending upon whether you wish to print the register as single- or double-precision. + +To see the contents of a memory location (physical address in hex): + + : mem 2020 + +To see the contents of memory with a virtual address (0 for core 0): + + : mem 0 2020 + +You can advance by one instruction by pressing the enter key. You can also +execute until a desired equality is reached: + + : until pc 0 2020 (stop when pc=2020) + : until reg 0 mie a (stop when register mie=0xa) + : until mem 2020 50a9907311096993 (stop when mem[2020]=50a9907311096993) + +Alternatively, you can execute as long as an equality is true: + + : while mem 2020 50a9907311096993 + +You can continue execution indefinitely by: + + : r + +At any point during execution (even without -d), you can enter the +interactive debug mode with `-`. + +To end the simulation from the debug prompt, press `-` or: + + : q + +Debugging With Gdb +------------------ + +An alternative to interactive debug mode is to attach using gdb. Because spike +tries to be like real hardware, you also need OpenOCD to do that. OpenOCD +doesn't currently know about address translation, so it's not possible to +easily debug programs that are run under `pk`. We'll use the following test +program: +``` +$ cat rot13.c +char text[] = "Vafgehpgvba frgf jnag gb or serr!"; + +// Don't use the stack, because sp isn't set up. +volatile int wait = 1; + +int main() +{ + while (wait) + ; + + // Doesn't actually go on the stack, because there are lots of GPRs. + int i = 0; + while (text[i]) { + char lower = text[i] | 32; + if (lower >= 'a' && lower <= 'm') + text[i] += 13; + else if (lower > 'm' && lower <= 'z') + text[i] -= 13; + i++; + } + +done: + while (!wait) + ; +} +$ cat spike.lds +OUTPUT_ARCH( "riscv" ) + +SECTIONS +{ + . = 0x10010000; + .text : { *(.text) } + .data : { *(.data) } +} +$ riscv64-unknown-elf-gcc -g -Og -o rot13-64.o -c rot13.c +$ riscv64-unknown-elf-gcc -g -Og -T spike.lds -nostartfiles -o rot13-64 rot13-64.o +``` + +To debug this program, first run spike telling it to listen for OpenOCD: +``` +$ spike --rbb-port=9824 -m0x10000000:0x20000 rot13-64 +Listening for remote bitbang connection on port 9824. +``` + +In a separate shell run OpenOCD with the appropriate configuration file: +``` +$ cat spike.cfg +interface remote_bitbang +remote_bitbang_host localhost +remote_bitbang_port 9824 + +set _CHIPNAME riscv +jtag newtap $_CHIPNAME cpu -irlen 5 -expected-id 0x10e31913 + +set _TARGETNAME $_CHIPNAME.cpu +target create $_TARGETNAME riscv -chain-position $_TARGETNAME + +gdb_report_data_abort enable + +init +halt +$ openocd -f spike.cfg +Open On-Chip Debugger 0.10.0-dev-00002-gc3b344d (2017-06-08-12:14) +... +riscv.cpu: target state: halted +``` + +In yet another shell, start your gdb debug session: +``` +tnewsome@compy-vm:~/SiFive/spike-test$ riscv64-unknown-elf-gdb rot13-64 +GNU gdb (GDB) 8.0.50.20170724-git +Copyright (C) 2017 Free Software Foundation, Inc. +License GPLv3+: GNU GPL version 3 or later +This is free software: you are free to change and redistribute it. +There is NO WARRANTY, to the extent permitted by law. Type "show copying" +and "show warranty" for details. +This GDB was configured as "--host=x86_64-pc-linux-gnu --target=riscv64-unknown-elf". +Type "show configuration" for configuration details. +For bug reporting instructions, please see: +. +Find the GDB manual and other documentation resources online at: +. +For help, type "help". +Type "apropos word" to search for commands related to "word"... +Reading symbols from rot13-64...done. +(gdb) target remote localhost:3333 +Remote debugging using localhost:3333 +0x0000000010010004 in main () at rot13.c:8 +8 while (wait) +(gdb) print wait +$1 = 1 +(gdb) print wait=0 +$2 = 0 +(gdb) print text +$3 = "Vafgehpgvba frgf jnag gb or serr!" +(gdb) b done +Breakpoint 1 at 0x10010064: file rot13.c, line 22. +(gdb) c +Continuing. +Disabling abstract command writes to CSRs. + +Breakpoint 1, main () at rot13.c:23 +23 while (!wait) +(gdb) print wait +$4 = 0 +(gdb) print text +... +``` diff --git a/vendor/riscv-isa-sim/VERSION b/vendor/riscv-isa-sim/VERSION new file mode 100644 index 00000000..6ce2a755 --- /dev/null +++ b/vendor/riscv-isa-sim/VERSION @@ -0,0 +1 @@ +#define SPIKE_VERSION "1.1.1-dev" diff --git a/vendor/riscv-isa-sim/aclocal.m4 b/vendor/riscv-isa-sim/aclocal.m4 new file mode 100644 index 00000000..def74dba --- /dev/null +++ b/vendor/riscv-isa-sim/aclocal.m4 @@ -0,0 +1,302 @@ +#========================================================================= +# Local Autoconf Macros +#========================================================================= +# This file contains the macros for the Modular C++ Build System and +# additional autoconf macros which developers can use in their +# configure.ac scripts. Please read the documentation in +# 'mcppbs-doc.txt' for more details on how the Modular C++ Build System +# works. The documenation for each macro should include information +# about the author, date, and copyright. + +#------------------------------------------------------------------------- +# MCPPBS_PROG_INSTALL +#------------------------------------------------------------------------- +# This macro will add an --enable-stow command line option to the +# configure script. When enabled, this macro will first check to see if +# the stow program is available and if so it will set the $stow shell +# variable to the binary name and the $enable_stow shell variable to +# "yes". These variables can be used in a makefile to conditionally use +# stow for installation. +# +# This macro uses two environment variables to help setup default stow +# locations. The $STOW_PREFIX is used for stowing native built packages. +# The packages are staged in $STOW_PREFIX/pkgs and then symlinks are +# created from within $STOW_PREFIX into the pkgs subdirectory. If you +# only do native builds then this is all you need to set. If you don't +# set $STOW_PREFIX then the default is just the normal default prefix +# which is almost always /usr/local. +# +# For non-native builds we probably want to install the packages in a +# different location which includes the host architecture name as part +# of the prefix. For these kind of builds, we can specify the $STOW_ROOT +# environment variable and the effective prefix will be +# $STOW_ROOT/${host_alias} where ${host_alias} is specified on the +# configure command line with "--host". +# +# Here is an example setup: +# +# STOW_ROOT="$HOME/install" +# STOW_ARCH="i386-macosx10.4" +# STOW_PREFIX="${STOW_ROOT}/${STOW_ARCH}" +# + +AC_DEFUN([MCPPBS_PROG_INSTALL], +[ + + # Configure command line option + + AC_ARG_ENABLE(stow, + AS_HELP_STRING(--enable-stow,[Enable stow-based install]), + [enable_stow="yes"],[enable_stow="no"]) + + AC_SUBST([enable_stow]) + + # Environment variables + + AC_ARG_VAR([STOW_ROOT], [Root for non-native stow-based installs]) + AC_ARG_VAR([STOW_PREFIX], [Prefix for stow-based installs]) + + # Check for install script + + AC_PROG_INSTALL +]) + +#------------------------------------------------------------------------- +# MCPPBS_PROG_RUN +# ------------------------------------------------------------------------- +# If we are doing a non-native build then we look for an isa simulator +# to use for running tests. We set the RUN substitution variable to be +# empty for native builds or to the name of the isa simulator for +# non-native builds. Thus a makefile can run compiled programs +# regardless if we are doing a native or non-native build like this: +# +# $(RUN) $(RUNFLAGS) ./test-program +# + +AC_DEFUN([MCPPBS_PROG_RUN], +[ + AS_IF([ test "${build}" != "${host}" ], + [ + AC_CHECK_TOOLS([RUN],[isa-run run],[no]) + AS_IF([ test ${RUN} = "no" ], + [ + AC_MSG_ERROR([Cannot find simulator for target ${target_alias}]) + ]) + ],[ + RUN="" + ]) + AC_SUBST([RUN]) + AC_SUBST([RUNFLAGS]) +]) + +#------------------------------------------------------------------------- +# MCPPBS_SUBPROJECTS([ sproj1, sproj2, ... ]) +#------------------------------------------------------------------------- +# The developer should call this macro with a list of the subprojects +# which make up this project. One should order the list such that any +# given subproject only depends on subprojects listed before it. The +# subproject names can also include an * suffix which indicates that +# this is an optional subproject. Optional subprojects are only included +# as part of the project build if enabled on the configure command line +# with a --enable- flag. The user can also specify that all +# optional subprojects should be included in the build with the +# --enable-optional-subprojects flag. +# +# Subproject names can also include a ** suffix which indicates that it +# is an optional subproject, but there is a group with the same name. +# Thus the --enable- command line option will enable not just the +# subproject sproj but all of the subprojects which are in the group. +# There is no error checking to make sure that if you use the ** suffix +# you actually define a group so be careful. +# +# Both required and optional subprojects should have a 'subproject.ac' +# file. The script's filename should be the abbreivated subproject name +# (assuming the subproject name is sproj then we would use 'sproj.ac') +# The MCPPBS_SUBPROJECTS macro includes the 'subproject.ac' files for +# enabled subprojects. Whitespace and newlines are allowed within the +# list. +# +# Author : Christopher Batten +# Date : September 10, 2008 + +AC_DEFUN([MCPPBS_SUBPROJECTS], +[ + + # Add command line argument to enable all optional subprojects + + AC_ARG_ENABLE(optional-subprojects, + AS_HELP_STRING([--enable-optional-subprojects], + [Enable all optional subprojects])) + + # Loop through the subprojects given in the macro argument + + m4_foreach([MCPPBS_SPROJ],[$1], + [ + + # Determine if this is a required or an optional subproject + + m4_define([MCPPBS_IS_REQ], + m4_bmatch(MCPPBS_SPROJ,[\*+],[false],[true])) + + # Determine if there is a group with the same name + + m4_define([MCPPBS_IS_GROUP], + m4_bmatch(MCPPBS_SPROJ,[\*\*],[true],[false])) + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + m4_define([MCPPBS_SPROJ_NORM], + m4_normalize(m4_bpatsubsts(MCPPBS_SPROJ,[*],[]))) + + m4_define([MCPPBS_SPROJ_DEFINE], + m4_toupper(m4_bpatsubst(MCPPBS_SPROJ_NORM[]_ENABLED,[-],[_]))) + + m4_define([MCPPBS_SPROJ_FUNC], + m4_bpatsubst(_mpbp_[]MCPPBS_SPROJ_NORM[]_configure,[-],[_])) + + m4_define([MCPPBS_SPROJ_UNDERSCORES], + m4_bpatsubsts(MCPPBS_SPROJ,[-],[_])) + + m4_define([MCPPBS_SPROJ_SHVAR], + m4_bpatsubst(enable_[]MCPPBS_SPROJ_NORM[]_sproj,[-],[_])) + + # Add subproject to our running list + + subprojects="$subprojects MCPPBS_SPROJ_NORM" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + m4_if(MCPPBS_IS_REQ,[true], + [ + AC_MSG_NOTICE([configuring default subproject : MCPPBS_SPROJ_NORM]) + AC_CONFIG_FILES(MCPPBS_SPROJ_NORM[].mk:MCPPBS_SPROJ_NORM[]/MCPPBS_SPROJ_NORM[].mk.in) + MCPPBS_SPROJ_SHVAR="yes" + subprojects_enabled="$subprojects_enabled MCPPBS_SPROJ_NORM" + AC_DEFINE(MCPPBS_SPROJ_DEFINE,, + [Define if subproject MCPPBS_SPROJ_NORM is enabled]) + m4_include(MCPPBS_SPROJ_NORM[]/MCPPBS_SPROJ_NORM[].ac) + ],[ + + # For optional subprojects we capture the 'subproject.ac' as a + # shell function so that in the MCPPBS_GROUP macro we can just + # call this shell function instead of reading in 'subproject.ac' + # again. + + MCPPBS_SPROJ_FUNC () + { + AC_MSG_NOTICE([configuring optional subproject : MCPPBS_SPROJ_NORM]) + AC_CONFIG_FILES(MCPPBS_SPROJ_NORM[].mk:MCPPBS_SPROJ_NORM[]/MCPPBS_SPROJ_NORM[].mk.in) + MCPPBS_SPROJ_SHVAR="yes" + subprojects_enabled="$subprojects_enabled MCPPBS_SPROJ_NORM" + AC_DEFINE(MCPPBS_SPROJ_DEFINE,, + [Define if subproject MCPPBS_SPROJ_NORM is enabled]) + m4_include(MCPPBS_SPROJ_NORM[]/MCPPBS_SPROJ_NORM[].ac) + }; + + # Optional subprojects add --enable-subproject command line + # options, _if_ the subproject name is not also a group name. + + m4_if(MCPPBS_IS_GROUP,[false], + [ + AC_ARG_ENABLE(MCPPBS_SPROJ_NORM, + AS_HELP_STRING(--enable-MCPPBS_SPROJ_NORM, + [Subproject MCPPBS_SPROJ_NORM]), + [MCPPBS_SPROJ_SHVAR="yes"],[MCPPBS_SPROJ_SHVAR="no"]) + + AS_IF([test "$MCPPBS_SPROJ_SHVAR" = "yes"], + [ + eval "MCPPBS_SPROJ_FUNC" + ],[ + AC_MSG_NOTICE([processing optional subproject : MCPPBS_SPROJ_NORM]) + ]) + + ],[ + + # If the subproject name is also a group name then we need to + # make sure that we set the shell variable for that subproject to + # no so that the group code knows we haven't run it yet. + + AC_MSG_NOTICE([processing optional subproject : MCPPBS_SPROJ_NORM]) + MCPPBS_SPROJ_SHVAR="no" + + ]) + + # Always execute the subproject configure code if we are enabling + # all subprojects. + + AS_IF([ test "$enable_optional_subprojects" = "yes" \ + && test "$MCPPBS_SPROJ_SHVAR" = "no" ], + [ + eval "MCPPBS_SPROJ_FUNC" + ]) + + ]) + + ]) + + # Output make variables + + AC_SUBST([subprojects]) + AC_SUBST([subprojects_enabled]) + +]) + +#------------------------------------------------------------------------- +# MCPPBS_GROUP( [group-name], [ sproj1, sproj2, ... ] ) +#------------------------------------------------------------------------- +# This macro creates a subproject group with the given group-name. When +# a user specifies --enable- the listed subprojects will be +# enabled. Groups can have the same name as a subproject and in that +# case whenever a user specifies --enable- the subprojects +# listed in the corresponding group will also be enabled. Groups are +# useful for specifying related subprojects which are usually enabled +# together, as well as for specifying that a specific optional +# subproject has dependencies on other optional subprojects. +# +# Author : Christopher Batten +# Date : September 10, 2008 + +AC_DEFUN([MCPPBS_GROUP], +[ + + m4_define([MCPPBS_GROUP_NORM], + m4_normalize([$1])) + + m4_define([MCPPBS_GROUP_SHVAR], + m4_bpatsubst(enable_[]MCPPBS_GROUP_NORM[]_group,[-],[_])) + + AC_ARG_ENABLE(MCPPBS_GROUP_NORM, + AS_HELP_STRING(--enable-MCPPBS_GROUP_NORM, + [Group MCPPBS_GROUP_NORM: $2]), + [MCPPBS_GROUP_SHVAR="yes"],[MCPPBS_GROUP_SHVAR="no"]) + + AS_IF([test "$MCPPBS_GROUP_SHVAR" = "yes" ], + [ + AC_MSG_NOTICE([configuring optional group : MCPPBS_GROUP_NORM]) + ]) + + m4_foreach([MCPPBS_SPROJ],[$2], + [ + + m4_define([MCPPBS_SPROJ_NORM], + m4_normalize(MCPPBS_SPROJ)) + + m4_define([MCPPBS_SPROJ_SHVAR], + m4_bpatsubst(enable_[]MCPPBS_SPROJ_NORM[]_sproj,[-],[_])) + + m4_define([MCPPBS_SPROJ_FUNC], + m4_bpatsubst(_mpbp_[]MCPPBS_SPROJ_NORM[]_configure,[-],[_])) + + AS_IF([ test "$MCPPBS_GROUP_SHVAR" = "yes" \ + && test "$MCPPBS_SPROJ_SHVAR" = "no" ], + [ + eval "MCPPBS_SPROJ_FUNC" + ]) + + ]) + +]) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/Makefile.include new file mode 100644 index 00000000..13eacdc1 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/Makefile.include @@ -0,0 +1,25 @@ +# set TARGETDIR to point to the directory which contains a sub-folder in the same name as the target +export TARGETDIR ?= /scratch/git-repo/github/neel/riscv-isa-sim/arch_test_target + +# set XLEN to max supported XLEN. Allowed values are 32 and 64 +export XLEN ?= 64 + +# name of the target. Note a folder of the same name must exist in the TARGETDIR directory +export RISCV_TARGET ?= spike + +# set the RISCV_DEVICE environment to a single extension you want to compile, simulate and/or verify. +# Leave this blank if you want to iterate through all the supported extensions available in the target +export RISCV_DEVICE ?= + +# set this to a string which needs to be passed to your target Makefile.include files +export RISCV_TARGET_FLAGS ?= + +# set this if you want to enable assertions on the test-suites. Currently no tests support +# assertions. +export RISCV_ASSERT ?= 0 + +# set the number of parallel jobs (along with any other arguments) you would like to execute. Note that the target needs to ensure +# that no common files across jobs are created/overwritten leading to unknown behavior +JOBS= -j1 + + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/README.md b/vendor/riscv-isa-sim/arch_test_target/spike/README.md new file mode 100644 index 00000000..56af2492 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/README.md @@ -0,0 +1,58 @@ +# Using the Spike Simulator as an Architectural test model + +This is a reference for running Spike as a target for the RISC-V Architectural Test framework. + +## Getting Spike + +The Spike repository should be cloned from [here](https://github.com/riscv/riscv-isa-sim/), preferably at the same directory level as the riscv-arch-test repository. + +## Building Spike + +The [README.md](../README.md) at the top level of the riscv-isa-sim directory gives details on building an executable spike model. + +## Adding Spike as a target to the Architectural Test framework + +Also at the top level is an ``arch_test_target directory``. This directory contains all the collaterals +required to add Spike as a target to the architectural test framework. + +The file ``arch_test_target/spike/Makefile.include`` contains various parameters which can be set by +the user to modify the instance of spike on which the tests need to be run. +The user can modify the ``XLEN`` variable based on whether 32-bit or 64-bit tests need to be run. +If one would like to run tests of a single extension then set the `RISCV_DEVICE` to that extension +name (eg. M, C, Zifencei, etc). Leaving the ``RISCV_DEVICE`` empty would indicate running all tests +for all extensions available in the ``device/rv{XLEN}i_m`` directory No other variables should be modified. + +Now clone the architectural test framework repo and copy the updated Makefile.include to it: + +``` + $ git clone https://github.com/riscv/riscv-arch-test.git + $ cd riscv-arch-test + $ cp /riscv-isa-sim/arch_test_target/spike/Makefile.include . +``` + +The user will have to modify the ``TARGETDIR`` variable in ``riscv-arch-test/Makefile.include`` to point to the +absolute location of the ``riscv-isa-sim/arch_test_target`` directory. + +You can execute the tests from the root directory of the riscv-arch-test repo: + +``` +make compile simulate verify +``` + +## Updating the target for new tests + +As tests for new extensions are added to the architectural test repo, the spike target (i.e. +arch_test_target directory) will also need to be updated accordingly. Please refer to the [Porting a new target](https://github.com/riscv/riscv-arch-test/blob/master/doc/README.adoc#5-porting-a-new-target) +section for more details on what those changes/updates should be. + + + + + + + + + + + + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/Makefile_common.inc b/vendor/riscv-isa-sim/arch_test_target/spike/device/Makefile_common.inc new file mode 100644 index 00000000..c43222de --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/Makefile_common.inc @@ -0,0 +1,34 @@ +TARGET_SIM ?= spike +TARGET_FLAGS ?= $(RISCV_TARGET_FLAGS) +ifeq ($(shell command -v $(TARGET_SIM) 2> /dev/null),) + $(error Target simulator executable '$(TARGET_SIM)` not found) +endif + +RISCV_PREFIX ?= riscv$(XLEN)-unknown-elf- +RISCV_GCC ?= $(RISCV_PREFIX)gcc +RISCV_OBJDUMP ?= $(RISCV_PREFIX)objdump +RISCV_GCC_OPTS ?= -g -static -mcmodel=medany -fvisibility=hidden -nostdlib -nostartfiles $(RVTEST_DEFINES) + +COMPILE_CMD = $$(RISCV_GCC) $(1) $$(RISCV_GCC_OPTS) \ + -I$(ROOTDIR)/riscv-test-suite/env/ \ + -I$(TARGETDIR)/$(RISCV_TARGET)/ \ + -T$(TARGETDIR)/$(RISCV_TARGET)/link.ld \ + $$(<) -o $$@ +OBJ_CMD = $$(RISCV_OBJDUMP) $$@ -D > $$@.objdump; \ + $$(RISCV_OBJDUMP) $$@ --source > $$@.debug + + +COMPILE_TARGET=\ + $(COMPILE_CMD); \ + if [ $$$$? -ne 0 ] ; \ + then \ + echo "\e[31m$$(RISCV_GCC) failed for target $$(@) \e[39m" ; \ + exit 1 ; \ + fi ; \ + $(OBJ_CMD); \ + if [ $$$$? -ne 0 ] ; \ + then \ + echo "\e[31m $$(RISCV_OBJDUMP) failed for target $$(@) \e[39m" ; \ + exit 1 ; \ + fi ; + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/C/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/C/Makefile.include new file mode 100644 index 00000000..daf0f434 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/C/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32ec \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/E/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/E/Makefile.include new file mode 100644 index 00000000..548b17d7 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/E/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32e \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/M/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/M/Makefile.include new file mode 100644 index 00000000..749c7fc2 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32e_unratified/M/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32em \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/C/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/C/Makefile.include new file mode 100644 index 00000000..346feaae --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/C/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32ic \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/F/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/F/Makefile.include new file mode 100644 index 00000000..4fb87c62 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/F/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32if \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/I/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/I/Makefile.include new file mode 100644 index 00000000..740755c0 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/I/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32i \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/M/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/M/Makefile.include new file mode 100644 index 00000000..5d8de47c --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/M/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32im \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/Zifencei/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/Zifencei/Makefile.include new file mode 100644 index 00000000..740755c0 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/Zifencei/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32i \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/privilege/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/privilege/Makefile.include new file mode 100644 index 00000000..8275495d --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv32i_m/privilege/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv32ic \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/C/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/C/Makefile.include new file mode 100644 index 00000000..e6ca9fb3 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/C/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64ic \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/D/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/D/Makefile.include new file mode 100644 index 00000000..26113946 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/D/Makefile.include @@ -0,0 +1,8 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64ifd \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/I/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/I/Makefile.include new file mode 100644 index 00000000..2c763bf5 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/I/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64i \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/M/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/M/Makefile.include new file mode 100644 index 00000000..8ce555c6 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/M/Makefile.include @@ -0,0 +1,8 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64im \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/Zifencei/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/Zifencei/Makefile.include new file mode 100644 index 00000000..2c763bf5 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/Zifencei/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64i \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/privilege/Makefile.include b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/privilege/Makefile.include new file mode 100644 index 00000000..5ef2802f --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/device/rv64i_m/privilege/Makefile.include @@ -0,0 +1,7 @@ +include $(TARGETDIR)/spike/device/Makefile_common.inc +RUN_CMD = $(TARGET_SIM) $(TARGET_FLAGS) --isa=rv64ic \ + +signature=$(*).signature.output +signature-granularity=4\ + $< + +RUN_TARGET=\ + $(RUN_CMD) diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/link.ld b/vendor/riscv-isa-sim/arch_test_target/spike/link.ld new file mode 100644 index 00000000..8ad95e04 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/link.ld @@ -0,0 +1,18 @@ +OUTPUT_ARCH( "riscv" ) +ENTRY(rvtest_entry_point) + +SECTIONS +{ + . = 0x80000000; + .text.init : { *(.text.init) } + . = ALIGN(0x1000); + .tohost : { *(.tohost) } + . = ALIGN(0x1000); + .text : { *(.text) } + . = ALIGN(0x1000); + .data : { *(.data) } + .data.string : { *(.data.string)} + .bss : { *(.bss) } + _end = .; +} + diff --git a/vendor/riscv-isa-sim/arch_test_target/spike/model_test.h b/vendor/riscv-isa-sim/arch_test_target/spike/model_test.h new file mode 100644 index 00000000..46a66017 --- /dev/null +++ b/vendor/riscv-isa-sim/arch_test_target/spike/model_test.h @@ -0,0 +1,70 @@ +#include "ibex_macros.h" + +#ifndef _COMPLIANCE_MODEL_H +#define _COMPLIANCE_MODEL_H + +#if XLEN == 64 + #define ALIGNMENT 3 +#else + #define ALIGNMENT 2 +#endif + +#define RVMODEL_DATA_SECTION \ + .pushsection .tohost,"aw",@progbits; \ + .align 8; .global tohost; tohost: .dword 0; \ + .align 8; .global fromhost; fromhost: .dword 0; \ + .popsection; \ + .align 8; .global begin_regstate; begin_regstate: \ + .word 128; \ + .align 8; .global end_regstate; end_regstate: \ + .word 4; + +//RV_COMPLIANCE_HALT +#define RVMODEL_HALT \ + fence; \ + li x2, SIGNATURE_ADDR; \ + li x1, (FINISHED_IRQ << 8) | CORE_STATUS; \ + sw x1, 0(x2); \ + li x1, (TEST_PASS << 8) | TEST_RESULT; \ + sw x1, 0(x2); \ + self_loop: j self_loop; + +#define RVMODEL_BOOT + +//RV_COMPLIANCE_DATA_BEGIN +#define RVMODEL_DATA_BEGIN \ + .align 4; .global begin_signature; begin_signature: + +//RV_COMPLIANCE_DATA_END +#define RVMODEL_DATA_END \ + .align 4; .global end_signature; end_signature: \ + RVMODEL_DATA_SECTION \ + +//RVTEST_IO_INIT +#define RVMODEL_IO_INIT +//RVTEST_IO_WRITE_STR +#define RVMODEL_IO_WRITE_STR(_R, _STR) +//RVTEST_IO_CHECK +#define RVMODEL_IO_CHECK() +//RVTEST_IO_ASSERT_GPR_EQ +#define RVMODEL_IO_ASSERT_GPR_EQ(_S, _R, _I) +//RVTEST_IO_ASSERT_SFPR_EQ +#define RVMODEL_IO_ASSERT_SFPR_EQ(_F, _R, _I) +//RVTEST_IO_ASSERT_DFPR_EQ +#define RVMODEL_IO_ASSERT_DFPR_EQ(_D, _R, _I) + +#define RVMODEL_SET_MSW_INT \ + li t1, 1; \ + li t2, 0x2000000; \ + sw t1, 0(t2); + +#define RVMODEL_CLEAR_MSW_INT \ + li t2, 0x2000000; \ + sw x0, 0(t2); + +#define RVMODEL_CLEAR_MTIMER_INT + +#define RVMODEL_CLEAR_MEXT_INT + +#endif // _COMPLIANCE_MODEL_H + diff --git a/vendor/riscv-isa-sim/ax_append_flag.m4 b/vendor/riscv-isa-sim/ax_append_flag.m4 new file mode 100644 index 00000000..dd6d8b61 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_append_flag.m4 @@ -0,0 +1,50 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) +# +# DESCRIPTION +# +# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space +# added in between. +# +# If FLAGS-VARIABLE is not specified, the current language's flags (e.g. +# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains +# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly +# FLAG. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + +AC_DEFUN([AX_APPEND_FLAG], +[dnl +AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_SET_IF +AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])]) +AS_VAR_SET_IF(FLAGS,[ + AS_CASE([" AS_VAR_GET(FLAGS) "], + [*" $1 "*], [AC_RUN_LOG([: FLAGS already contains $1])], + [ + AS_VAR_APPEND(FLAGS,[" $1"]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) + ], + [ + AS_VAR_SET(FLAGS,[$1]) + AC_RUN_LOG([: FLAGS="$FLAGS"]) + ]) +AS_VAR_POPDEF([FLAGS])dnl +])dnl AX_APPEND_FLAG diff --git a/vendor/riscv-isa-sim/ax_append_link_flags.m4 b/vendor/riscv-isa-sim/ax_append_link_flags.m4 new file mode 100644 index 00000000..99b9fa5b --- /dev/null +++ b/vendor/riscv-isa-sim/ax_append_link_flags.m4 @@ -0,0 +1,44 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_link_flags.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_LINK_FLAGS([FLAG1 FLAG2 ...], [FLAGS-VARIABLE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# For every FLAG1, FLAG2 it is checked whether the linker works with the +# flag. If it does, the flag is added FLAGS-VARIABLE +# +# If FLAGS-VARIABLE is not specified, the linker's flags (LDFLAGS) is +# used. During the check the flag is always added to the linker's flags. +# +# If EXTRA-FLAGS is defined, it is added to the linker's default flags +# when the check is done. The check is thus made with the flags: "LDFLAGS +# EXTRA-FLAGS FLAG". This can for example be used to force the linker to +# issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: This macro depends on the AX_APPEND_FLAG and AX_CHECK_LINK_FLAG. +# Please keep this macro in sync with AX_APPEND_COMPILE_FLAGS. +# +# LICENSE +# +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 7 + +AC_DEFUN([AX_APPEND_LINK_FLAGS], +[AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +AX_REQUIRE_DEFINED([AX_APPEND_FLAG]) +for flag in $1; do + AX_CHECK_LINK_FLAG([$flag], [AX_APPEND_FLAG([$flag], [m4_default([$2], [LDFLAGS])])], [], [$3], [$4]) +done +])dnl AX_APPEND_LINK_FLAGS diff --git a/vendor/riscv-isa-sim/ax_boost_asio.m4 b/vendor/riscv-isa-sim/ax_boost_asio.m4 new file mode 100644 index 00000000..4247b33c --- /dev/null +++ b/vendor/riscv-isa-sim/ax_boost_asio.m4 @@ -0,0 +1,110 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_asio.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_ASIO +# +# DESCRIPTION +# +# Test for Asio library from the Boost C++ libraries. The macro requires a +# preceding call to AX_BOOST_BASE. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_ASIO_LIB) +# +# And sets: +# +# HAVE_BOOST_ASIO +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2008 Pete Greenwell +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 18 + +AC_DEFUN([AX_BOOST_ASIO], +[ + AC_ARG_WITH([boost-asio], + AS_HELP_STRING([--with-boost-asio@<:@=special-lib@:>@], + [use the ASIO library from boost - it is possible to specify a certain library for the linker + e.g. --with-boost-asio=boost_system-gcc41-mt-1_34 ]), + [ + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ax_boost_user_asio_lib="" + else + want_boost="yes" + ax_boost_user_asio_lib="$withval" + fi + ], + [want_boost="yes"] + ) + + if test "x$want_boost" = "xyes"; then + AC_REQUIRE([AC_PROG_CC]) + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_CACHE_CHECK(whether the Boost::ASIO library is available, + ax_cv_boost_asio, + [AC_LANG_PUSH([C++]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ @%:@include + ]], + [[ + + boost::asio::io_service io; + boost::system::error_code timer_result; + boost::asio::deadline_timer t(io); + t.cancel(); + io.run_one(); + return 0; + ]])], + ax_cv_boost_asio=yes, ax_cv_boost_asio=no) + AC_LANG_POP([C++]) + ]) + if test "x$ax_cv_boost_asio" = "xyes"; then + AC_DEFINE(HAVE_BOOST_ASIO,,[define if the Boost::ASIO library is available]) + BN=boost_system + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` + if test "x$ax_boost_user_asio_lib" = "x"; then + for ax_lib in `ls $BOOSTLIBDIR/libboost_system*.so* $BOOSTLIBDIR/libboost_system*.dylib* $BOOSTLIBDIR/libboost_system*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_system.*\)\.so.*$;\1;' -e 's;^lib\(boost_system.*\)\.dylib.*$;\1;' -e 's;^lib\(boost_system.*\)\.a.*$;\1;' ` ; do + AC_CHECK_LIB($ax_lib, main, [BOOST_ASIO_LIB="-l$ax_lib" AC_SUBST(BOOST_ASIO_LIB) link_thread="yes" break], + [link_thread="no"]) + done + else + for ax_lib in $ax_boost_user_asio_lib $BN-$ax_boost_user_asio_lib; do + AC_CHECK_LIB($ax_lib, main, + [BOOST_ASIO_LIB="-l$ax_lib" AC_SUBST(BOOST_ASIO_LIB) link_asio="yes" break], + [link_asio="no"]) + done + + fi + if test "x$ax_lib" = "x"; then + AC_MSG_ERROR(Could not find a version of the Boost::Asio library!) + fi + if test "x$link_asio" = "xno"; then + AC_MSG_ERROR(Could not link against $ax_lib !) + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi +]) diff --git a/vendor/riscv-isa-sim/ax_boost_base.m4 b/vendor/riscv-isa-sim/ax_boost_base.m4 new file mode 100644 index 00000000..519f1c9d --- /dev/null +++ b/vendor/riscv-isa-sim/ax_boost_base.m4 @@ -0,0 +1,303 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_base.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# +# DESCRIPTION +# +# Test for the Boost C++ libraries of a particular version (or newer) +# +# If no path to the installed boost library is given the macro searchs +# under /usr, /usr/local, /opt and /opt/local and evaluates the +# $BOOST_ROOT environment variable. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS) +# +# And sets: +# +# HAVE_BOOST +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2009 Peter Adolphs +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 49 + +# example boost program (need to pass version) +m4_define([_AX_BOOST_BASE_PROGRAM], + [AC_LANG_PROGRAM([[ +#include +]],[[ +(void) ((void)sizeof(char[1 - 2*!!((BOOST_VERSION) < ($1))])); +]])]) + +AC_DEFUN([AX_BOOST_BASE], +[ +AC_ARG_WITH([boost], + [AS_HELP_STRING([--with-boost@<:@=ARG@:>@], + [use Boost library from a standard location (ARG=yes), + from the specified location (ARG=), + or disable it (ARG=no) + @<:@ARG=yes@:>@ ])], + [ + AS_CASE([$withval], + [no],[want_boost="no";_AX_BOOST_BASE_boost_path=""], + [yes],[want_boost="yes";_AX_BOOST_BASE_boost_path=""], + [want_boost="yes";_AX_BOOST_BASE_boost_path="$withval"]) + ], + [want_boost="yes"]) + + +AC_ARG_WITH([boost-libdir], + [AS_HELP_STRING([--with-boost-libdir=LIB_DIR], + [Force given directory for boost libraries. + Note that this will override library path detection, + so use this parameter only if default library detection fails + and you know exactly where your boost libraries are located.])], + [ + AS_IF([test -d "$withval"], + [_AX_BOOST_BASE_boost_lib_path="$withval"], + [AC_MSG_ERROR([--with-boost-libdir expected directory name])]) + ], + [_AX_BOOST_BASE_boost_lib_path=""]) + +BOOST_LDFLAGS="" +BOOST_CPPFLAGS="" +AS_IF([test "x$want_boost" = "xyes"], + [_AX_BOOST_BASE_RUNDETECT([$1],[$2],[$3])]) +AC_SUBST(BOOST_CPPFLAGS) +AC_SUBST(BOOST_LDFLAGS) +]) + + +# convert a version string in $2 to numeric and affect to polymorphic var $1 +AC_DEFUN([_AX_BOOST_BASE_TONUMERICVERSION],[ + AS_IF([test "x$2" = "x"],[_AX_BOOST_BASE_TONUMERICVERSION_req="1.20.0"],[_AX_BOOST_BASE_TONUMERICVERSION_req="$2"]) + _AX_BOOST_BASE_TONUMERICVERSION_req_shorten=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([[0-9]]*\.[[0-9]]*\)'` + _AX_BOOST_BASE_TONUMERICVERSION_req_major=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([[0-9]]*\)'` + AS_IF([test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_major" = "x"], + [AC_MSG_ERROR([You should at least specify libboost major version])]) + _AX_BOOST_BASE_TONUMERICVERSION_req_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[[0-9]]*\.\([[0-9]]*\)'` + AS_IF([test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_minor" = "x"], + [_AX_BOOST_BASE_TONUMERICVERSION_req_minor="0"]) + _AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` + AS_IF([test "X$_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor" = "X"], + [_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor="0"]) + _AX_BOOST_BASE_TONUMERICVERSION_RET=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req_major \* 100000 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_minor \* 100 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor` + AS_VAR_SET($1,$_AX_BOOST_BASE_TONUMERICVERSION_RET) +]) + +dnl Run the detection of boost should be run only if $want_boost +AC_DEFUN([_AX_BOOST_BASE_RUNDETECT],[ + _AX_BOOST_BASE_TONUMERICVERSION(WANT_BOOST_VERSION,[$1]) + succeeded=no + + + AC_REQUIRE([AC_CANONICAL_HOST]) + dnl On 64-bit systems check for system libraries in both lib64 and lib. + dnl The former is specified by FHS, but e.g. Debian does not adhere to + dnl this (as it rises problems for generic multi-arch support). + dnl The last entry in the list is chosen by default when no libraries + dnl are found, e.g. when only header-only libraries are installed! + AS_CASE([${host_cpu}], + [x86_64],[libsubdirs="lib64 libx32 lib lib64"], + [mips*64*],[libsubdirs="lib64 lib32 lib lib64"], + [ppc64|powerpc64|s390x|sparc64|aarch64|ppc64le|powerpc64le|riscv64|e2k],[libsubdirs="lib64 lib lib64"], + [libsubdirs="lib"] + ) + + dnl allow for real multi-arch paths e.g. /usr/lib/x86_64-linux-gnu. Give + dnl them priority over the other paths since, if libs are found there, they + dnl are almost assuredly the ones desired. + AS_CASE([${host_cpu}], + [i?86],[multiarch_libsubdir="lib/i386-${host_os}"], + [armv7l],[multiarch_libsubdir="lib/arm-${host_os}"], + [multiarch_libsubdir="lib/${host_cpu}-${host_os}"] + ) + + dnl first we check the system location for boost libraries + dnl this location ist chosen if boost libraries are installed with the --layout=system option + dnl or if you install boost with RPM + AS_IF([test "x$_AX_BOOST_BASE_boost_path" != "x"],[ + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) includes in "$_AX_BOOST_BASE_boost_path/include"]) + AS_IF([test -d "$_AX_BOOST_BASE_boost_path/include" && test -r "$_AX_BOOST_BASE_boost_path/include"],[ + AC_MSG_RESULT([yes]) + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include" + for _AX_BOOST_BASE_boost_path_tmp in $multiarch_libsubdir $libsubdirs; do + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION) lib path in "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"]) + AS_IF([test -d "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" && test -r "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" ],[ + AC_MSG_RESULT([yes]) + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"; + break; + ], + [AC_MSG_RESULT([no])]) + done],[ + AC_MSG_RESULT([no])]) + ],[ + if test X"$cross_compiling" = Xyes; then + search_libsubdirs=$multiarch_libsubdir + else + search_libsubdirs="$multiarch_libsubdir $libsubdirs" + fi + for _AX_BOOST_BASE_boost_path_tmp in /usr /usr/local /opt /opt/local ; do + if test -d "$_AX_BOOST_BASE_boost_path_tmp/include/boost" && test -r "$_AX_BOOST_BASE_boost_path_tmp/include/boost" ; then + for libsubdir in $search_libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path_tmp/$libsubdir" + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path_tmp/include" + break; + fi + done + ]) + + dnl overwrite ld flags if we have required special directory with + dnl --with-boost-libdir parameter + AS_IF([test "x$_AX_BOOST_BASE_boost_lib_path" != "x"], + [BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_lib_path"]) + + AC_MSG_CHECKING([for boostlib >= $1 ($WANT_BOOST_VERSION)]) + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_REQUIRE([AC_PROG_CXX]) + AC_LANG_PUSH(C++) + AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[ + AC_MSG_RESULT(yes) + succeeded=yes + found_system=yes + ],[ + ]) + AC_LANG_POP([C++]) + + + + dnl if we found no boost with system layout we search for boost libraries + dnl built and installed without the --layout=system option or for a staged(not installed) version + if test "x$succeeded" != "xyes" ; then + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + BOOST_CPPFLAGS= + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + BOOST_LDFLAGS= + fi + _version=0 + if test -n "$_AX_BOOST_BASE_boost_path" ; then + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path"; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "x$V_CHECK" = "x1" ; then + _version=$_version_tmp + fi + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include/boost-$VERSION_UNDERSCORE" + done + dnl if nothing found search for layout used in Windows distributions + if test -z "$BOOST_CPPFLAGS"; then + if test -d "$_AX_BOOST_BASE_boost_path/boost" && test -r "$_AX_BOOST_BASE_boost_path/boost"; then + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path" + fi + fi + dnl if we found something and BOOST_LDFLAGS was unset before + dnl (because "$_AX_BOOST_BASE_boost_lib_path" = ""), set it here. + if test -n "$BOOST_CPPFLAGS" && test -z "$BOOST_LDFLAGS"; then + for libsubdir in $libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$libsubdir" + fi + fi + else + if test "x$cross_compiling" != "xyes" ; then + for _AX_BOOST_BASE_boost_path in /usr /usr/local /opt /opt/local ; do + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path" ; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "x$V_CHECK" = "x1" ; then + _version=$_version_tmp + best_path=$_AX_BOOST_BASE_boost_path + fi + done + fi + done + + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE" + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + for libsubdir in $libsubdirs ; do + if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$best_path/$libsubdir" + fi + fi + + if test -n "$BOOST_ROOT" ; then + for libsubdir in $libsubdirs ; do + if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then + version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'` + stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'` + stage_version_shorten=`expr $stage_version : '\([[0-9]]*\.[[0-9]]*\)'` + V_CHECK=`expr $stage_version_shorten \>\= $_version` + if test "x$V_CHECK" = "x1" && test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + AC_MSG_NOTICE(We will use a staged boost library from $BOOST_ROOT) + BOOST_CPPFLAGS="-I$BOOST_ROOT" + BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir" + fi + fi + fi + fi + + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_LANG_PUSH(C++) + AC_COMPILE_IFELSE([_AX_BOOST_BASE_PROGRAM($WANT_BOOST_VERSION)],[ + AC_MSG_RESULT(yes) + succeeded=yes + found_system=yes + ],[ + ]) + AC_LANG_POP([C++]) + fi + + if test "x$succeeded" != "xyes" ; then + if test "x$_version" = "x0" ; then + AC_MSG_NOTICE([[We could not detect the boost libraries (version $1 or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation.]]) + else + AC_MSG_NOTICE([Your boost libraries seems to old (version $_version).]) + fi + # execute ACTION-IF-NOT-FOUND (if present): + ifelse([$3], , :, [$3]) + else + AC_DEFINE(HAVE_BOOST,,[define if the Boost library is available]) + # execute ACTION-IF-FOUND (if present): + ifelse([$2], , :, [$2]) + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + +]) diff --git a/vendor/riscv-isa-sim/ax_boost_regex.m4 b/vendor/riscv-isa-sim/ax_boost_regex.m4 new file mode 100644 index 00000000..e2413c24 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_boost_regex.m4 @@ -0,0 +1,111 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_regex.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_REGEX +# +# DESCRIPTION +# +# Test for Regex library from the Boost C++ libraries. The macro requires +# a preceding call to AX_BOOST_BASE. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_REGEX_LIB) +# +# And sets: +# +# HAVE_BOOST_REGEX +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2008 Michael Tindal +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 23 + +AC_DEFUN([AX_BOOST_REGEX], +[ + AC_ARG_WITH([boost-regex], + AS_HELP_STRING([--with-boost-regex@<:@=special-lib@:>@], + [use the Regex library from boost - it is possible to specify a certain library for the linker + e.g. --with-boost-regex=boost_regex-gcc-mt-d-1_33_1 ]), + [ + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ax_boost_user_regex_lib="" + else + want_boost="yes" + ax_boost_user_regex_lib="$withval" + fi + ], + [want_boost="yes"] + ) + + if test "x$want_boost" = "xyes"; then + AC_REQUIRE([AC_PROG_CC]) + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_CACHE_CHECK(whether the Boost::Regex library is available, + ax_cv_boost_regex, + [AC_LANG_PUSH([C++]) + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include + ]], + [[boost::regex r(); return 0;]])], + ax_cv_boost_regex=yes, ax_cv_boost_regex=no) + AC_LANG_POP([C++]) + ]) + if test "x$ax_cv_boost_regex" = "xyes"; then + AC_DEFINE(HAVE_BOOST_REGEX,,[define if the Boost::Regex library is available]) + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` + if test "x$ax_boost_user_regex_lib" = "x"; then + for libextension in `ls $BOOSTLIBDIR/libboost_regex*.so* $BOOSTLIBDIR/libboost_regex*.dylib* $BOOSTLIBDIR/libboost_regex*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_regex.*\)\.so.*$;\1;' -e 's;^lib\(boost_regex.*\)\.dylib.*;\1;' -e 's;^lib\(boost_regex.*\)\.a.*$;\1;'` ; do + ax_lib=${libextension} + AC_CHECK_LIB($ax_lib, exit, + [BOOST_REGEX_LIB="-l$ax_lib"; AC_SUBST(BOOST_REGEX_LIB) link_regex="yes"; break], + [link_regex="no"]) + done + if test "x$link_regex" != "xyes"; then + for libextension in `ls $BOOSTLIBDIR/boost_regex*.dll* $BOOSTLIBDIR/boost_regex*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_regex.*\)\.dll.*$;\1;' -e 's;^\(boost_regex.*\)\.a.*$;\1;'` ; do + ax_lib=${libextension} + AC_CHECK_LIB($ax_lib, exit, + [BOOST_REGEX_LIB="-l$ax_lib"; AC_SUBST(BOOST_REGEX_LIB) link_regex="yes"; break], + [link_regex="no"]) + done + fi + + else + for ax_lib in $ax_boost_user_regex_lib boost_regex-$ax_boost_user_regex_lib; do + AC_CHECK_LIB($ax_lib, main, + [BOOST_REGEX_LIB="-l$ax_lib"; AC_SUBST(BOOST_REGEX_LIB) link_regex="yes"; break], + [link_regex="no"]) + done + fi + if test "x$ax_lib" = "x"; then + AC_MSG_ERROR(Could not find a version of the Boost::Regex library!) + fi + if test "x$link_regex" != "xyes"; then + AC_MSG_ERROR(Could not link against $ax_lib !) + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi +]) diff --git a/vendor/riscv-isa-sim/ax_check_compile_flag.m4 b/vendor/riscv-isa-sim/ax_check_compile_flag.m4 new file mode 100644 index 00000000..bd753b34 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_check_compile_flag.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/vendor/riscv-isa-sim/ax_check_link_flag.m4 b/vendor/riscv-isa-sim/ax_check_link_flag.m4 new file mode 100644 index 00000000..03a30ce4 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_check_link_flag.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_link_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_LINK_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the linker or gives an error. +# (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the linker's default flags +# when the check is done. The check is thus made with the flags: "LDFLAGS +# EXTRA-FLAGS FLAG". This can for example be used to force the linker to +# issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_LINK_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,COMPILE}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_LINK_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_ldflags_$4_$1])dnl +AC_CACHE_CHECK([whether the linker accepts $1], CACHEVAR, [ + ax_check_save_flags=$LDFLAGS + LDFLAGS="$LDFLAGS $4 $1" + AC_LINK_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + LDFLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_LINK_FLAGS diff --git a/vendor/riscv-isa-sim/ax_require_defined.m4 b/vendor/riscv-isa-sim/ax_require_defined.m4 new file mode 100644 index 00000000..17c3eab7 --- /dev/null +++ b/vendor/riscv-isa-sim/ax_require_defined.m4 @@ -0,0 +1,37 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_require_defined.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_REQUIRE_DEFINED(MACRO) +# +# DESCRIPTION +# +# AX_REQUIRE_DEFINED is a simple helper for making sure other macros have +# been defined and thus are available for use. This avoids random issues +# where a macro isn't expanded. Instead the configure script emits a +# non-fatal: +# +# ./configure: line 1673: AX_CFLAGS_WARN_ALL: command not found +# +# It's like AC_REQUIRE except it doesn't expand the required macro. +# +# Here's an example: +# +# AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +# +# LICENSE +# +# Copyright (c) 2014 Mike Frysinger +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + +AC_DEFUN([AX_REQUIRE_DEFINED], [dnl + m4_ifndef([$1], [m4_fatal([macro ]$1[ is not defined; is a m4 file missing?])]) +])dnl AX_REQUIRE_DEFINED diff --git a/vendor/riscv-isa-sim/ci-tests/test-spike b/vendor/riscv-isa-sim/ci-tests/test-spike new file mode 100755 index 00000000..3d5ed6d7 --- /dev/null +++ b/vendor/riscv-isa-sim/ci-tests/test-spike @@ -0,0 +1,11 @@ +#!/bin/bash +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +mkdir build +cd build +mkdir install +$DIR/../configure --prefix=`pwd`/install +make -j4 +make install diff --git a/vendor/riscv-isa-sim/config.h.in b/vendor/riscv-isa-sim/config.h.in new file mode 100644 index 00000000..46d8c00b --- /dev/null +++ b/vendor/riscv-isa-sim/config.h.in @@ -0,0 +1,142 @@ +/* config.h.in. Generated from configure.ac by autoheader. */ + +/* Define if building universal (internal helper macro) */ +#undef AC_APPLE_UNIVERSAL_BUILD + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef CUSTOMEXT_ENABLED + +/* Default value for --isa switch */ +#undef DEFAULT_ISA + +/* Default value for --priv switch */ +#undef DEFAULT_PRIV + +/* Default value for --varch switch */ +#undef DEFAULT_VARCH + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef DISASM_ENABLED + +/* Executable name of device-tree-compiler */ +#undef DTC + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef FDT_ENABLED + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef FESVR_ENABLED + +/* define if the Boost library is available */ +#undef HAVE_BOOST + +/* define if the Boost::ASIO library is available */ +#undef HAVE_BOOST_ASIO + +/* Dynamic library loading is supported */ +#undef HAVE_DLOPEN + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `boost_regex' library (-lboost_regex). */ +#undef HAVE_LIBBOOST_REGEX + +/* Define to 1 if you have the `boost_system' library (-lboost_system). */ +#undef HAVE_LIBBOOST_SYSTEM + +/* Define to 1 if you have the `pthread' library (-lpthread). */ +#undef HAVE_LIBPTHREAD + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if struct statx exists. */ +#undef HAVE_STATX + +/* Define to 1 if struct statx has stx_mnt_id. */ +#undef HAVE_STATX_MNT_ID + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef RISCV_ENABLED + +/* Enable commit log generation */ +#undef RISCV_ENABLE_COMMITLOG + +/* Enable hardware management of PTE accessed and dirty bits */ +#undef RISCV_ENABLE_DIRTY + +/* Enable support for running target in either endianness */ +#undef RISCV_ENABLE_DUAL_ENDIAN + +/* Enable PC histogram generation */ +#undef RISCV_ENABLE_HISTOGRAM + +/* Enable hardware support for misaligned loads and stores */ +#undef RISCV_ENABLE_MISALIGNED + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef SOFTFLOAT_ENABLED + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef SPIKE_DASM_ENABLED + +/* Define if subproject MCPPBS_SPROJ_NORM is enabled */ +#undef SPIKE_MAIN_ENABLED + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Default value for --with-target switch */ +#undef TARGET_ARCH + +/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most + significant byte first (like Motorola and SPARC, unlike Intel). */ +#if defined AC_APPLE_UNIVERSAL_BUILD +# if defined __BIG_ENDIAN__ +# define WORDS_BIGENDIAN 1 +# endif +#else +# ifndef WORDS_BIGENDIAN +# undef WORDS_BIGENDIAN +# endif +#endif diff --git a/vendor/riscv-isa-sim/configure b/vendor/riscv-isa-sim/configure new file mode 100755 index 00000000..0af582f3 --- /dev/null +++ b/vendor/riscv-isa-sim/configure @@ -0,0 +1,7714 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for RISC-V ISA Simulator ?. +# +# Report bugs to . +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and Andrew Waterman +$0: about your system, including any error possibly output +$0: before this message. Then install a modern shell, or +$0: manually run the script under such a shell if you do +$0: have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='RISC-V ISA Simulator' +PACKAGE_TARNAME='spike' +PACKAGE_VERSION='?' +PACKAGE_STRING='RISC-V ISA Simulator ?' +PACKAGE_BUGREPORT='Andrew Waterman' +PACKAGE_URL='' + +ac_unique_file="riscv/common.h" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='LTLIBOBJS +LIBOBJS +subprojects_enabled +subprojects +HAVE_DLOPEN +BOOST_REGEX_LIB +BOOST_ASIO_LIB +BOOST_LDFLAGS +BOOST_CPPFLAGS +HAVE_CLANG_PCH +HAVE_INT128 +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +STOW_PREFIX +STOW_ROOT +enable_stow +EGREP +GREP +CXXCPP +DTC +RANLIB +AR +ac_ct_CXX +CXXFLAGS +CXX +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +runstatedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_stow +enable_optional_subprojects +with_boost +with_boost_libdir +with_boost_asio +with_boost_regex +with_isa +with_priv +with_varch +with_target +enable_commitlog +enable_histogram +enable_dirty +enable_misaligned +enable_dual_endian +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CXX +CXXFLAGS +CCC +CXXCPP +STOW_ROOT +STOW_PREFIX' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir runstatedir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures RISC-V ISA Simulator ? to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/spike] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of RISC-V ISA Simulator ?:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-stow Enable stow-based install + --enable-optional-subprojects + Enable all optional subprojects + --enable-commitlog Enable commit log generation + --enable-histogram Enable PC histogram generation + --enable-dirty Enable hardware management of PTE accessed and dirty + bits + --enable-misaligned Enable hardware support for misaligned loads and + stores + --enable-dual-endian Enable support for running target in either + endianness + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-boost[=ARG] use Boost library from a standard location + (ARG=yes), from the specified location (ARG=), + or disable it (ARG=no) [ARG=yes] + --with-boost-libdir=LIB_DIR + Force given directory for boost libraries. Note that + this will override library path detection, so use + this parameter only if default library detection + fails and you know exactly where your boost + libraries are located. + --with-boost-asio[=special-lib] + use the ASIO library from boost - it is possible to + specify a certain library for the linker e.g. + --with-boost-asio=boost_system-gcc41-mt-1_34 + --with-boost-regex[=special-lib] + use the Regex library from boost - it is possible to + specify a certain library for the linker e.g. + --with-boost-regex=boost_regex-gcc-mt-d-1_33_1 + --with-isa=RV64IMAFDC Sets the default RISC-V ISA + --with-priv=MSU Sets the default RISC-V privilege modes supported + --with-varch=vlen:128,elen:64 + Sets the default vector config + --with-target=riscv64-unknown-elf + Sets the default target config + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CXXCPP C++ preprocessor + STOW_ROOT Root for non-native stow-based installs + STOW_PREFIX Prefix for stow-based installs + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +RISC-V ISA Simulator configure ? +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_cxx_try_run LINENO +# ------------------------ +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_cxx_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_run + +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_cpp + +# ac_fn_cxx_check_header_compile LINENO HEADER VAR INCLUDES +# --------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_cxx_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_header_compile + +# ac_fn_cxx_check_type LINENO TYPE VAR INCLUDES +# --------------------------------------------- +# Tests whether TYPE exists after having included INCLUDES, setting cache +# variable VAR accordingly. +ac_fn_cxx_check_type () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=no" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof ($2)) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +int +main () +{ +if (sizeof (($2))) + return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + eval "$3=yes" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_type + +# ac_fn_cxx_try_link LINENO +# ------------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_link + +# ac_fn_cxx_check_member LINENO AGGR MEMBER VAR INCLUDES +# ------------------------------------------------------ +# Tries to find if the field MEMBER exists in type AGGR, after including +# INCLUDES, setting cache variable VAR accordingly. +ac_fn_cxx_check_member () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2.$3" >&5 +$as_echo_n "checking for $2.$3... " >&6; } +if eval \${$4+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval "$4=yes" +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$5 +int +main () +{ +static $2 ac_aggr; +if (sizeof ac_aggr.$3) +return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + eval "$4=yes" +else + eval "$4=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$4 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_member +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by RISC-V ISA Simulator $as_me ?, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +ac_aux_dir= +for ac_dir in scripts "$srcdir"/scripts; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in scripts \"$srcdir\"/scripts" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_require_defined.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_REQUIRE_DEFINED(MACRO) +# +# DESCRIPTION +# +# AX_REQUIRE_DEFINED is a simple helper for making sure other macros have +# been defined and thus are available for use. This avoids random issues +# where a macro isn't expanded. Instead the configure script emits a +# non-fatal: +# +# ./configure: line 1673: AX_CFLAGS_WARN_ALL: command not found +# +# It's like AC_REQUIRE except it doesn't expand the required macro. +# +# Here's an example: +# +# AX_REQUIRE_DEFINED([AX_CHECK_LINK_FLAG]) +# +# LICENSE +# +# Copyright (c) 2014 Mike Frysinger +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_FLAG(FLAG, [FLAGS-VARIABLE]) +# +# DESCRIPTION +# +# FLAG is appended to the FLAGS-VARIABLE shell variable, with a space +# added in between. +# +# If FLAGS-VARIABLE is not specified, the current language's flags (e.g. +# CFLAGS) is used. FLAGS-VARIABLE is not changed if it already contains +# FLAG. If FLAGS-VARIABLE is unset in the shell, it is set to exactly +# FLAG. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 8 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_link_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_LINK_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the linker or gives an error. +# (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the linker's default flags +# when the check is done. The check is thus made with the flags: "LDFLAGS +# EXTRA-FLAGS FLAG". This can for example be used to force the linker to +# issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_LINK_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,COMPILE}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_append_link_flags.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_APPEND_LINK_FLAGS([FLAG1 FLAG2 ...], [FLAGS-VARIABLE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# For every FLAG1, FLAG2 it is checked whether the linker works with the +# flag. If it does, the flag is added FLAGS-VARIABLE +# +# If FLAGS-VARIABLE is not specified, the linker's flags (LDFLAGS) is +# used. During the check the flag is always added to the linker's flags. +# +# If EXTRA-FLAGS is defined, it is added to the linker's default flags +# when the check is done. The check is thus made with the flags: "LDFLAGS +# EXTRA-FLAGS FLAG". This can for example be used to force the linker to +# issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: This macro depends on the AX_APPEND_FLAG and AX_CHECK_LINK_FLAG. +# Please keep this macro in sync with AX_APPEND_COMPILE_FLAGS. +# +# LICENSE +# +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 7 + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_base.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_BASE([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# +# DESCRIPTION +# +# Test for the Boost C++ libraries of a particular version (or newer) +# +# If no path to the installed boost library is given the macro searchs +# under /usr, /usr/local, /opt and /opt/local and evaluates the +# $BOOST_ROOT environment variable. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_CPPFLAGS) / AC_SUBST(BOOST_LDFLAGS) +# +# And sets: +# +# HAVE_BOOST +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2009 Peter Adolphs +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 49 + +# example boost program (need to pass version) + + + + + +# convert a version string in $2 to numeric and affect to polymorphic var $1 + + + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_asio.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_ASIO +# +# DESCRIPTION +# +# Test for Asio library from the Boost C++ libraries. The macro requires a +# preceding call to AX_BOOST_BASE. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_ASIO_LIB) +# +# And sets: +# +# HAVE_BOOST_ASIO +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2008 Pete Greenwell +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 18 + + + +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_regex.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_REGEX +# +# DESCRIPTION +# +# Test for Regex library from the Boost C++ libraries. The macro requires +# a preceding call to AX_BOOST_BASE. Further documentation is available at +# . +# +# This macro calls: +# +# AC_SUBST(BOOST_REGEX_LIB) +# +# And sets: +# +# HAVE_BOOST_REGEX +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg +# Copyright (c) 2008 Michael Tindal +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 23 + + + + +#------------------------------------------------------------------------- +# Checks for programs +#------------------------------------------------------------------------- + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +set dummy ${ac_tool_prefix}ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="${ac_tool_prefix}ar" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_AR"; then + ac_ct_AR=$AR + # Extract the first word of "ar", so it can be a program name with args. +set dummy ar; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="ar" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_AR" = x; then + AR="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +else + AR="$ac_cv_prog_AR" +fi + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +# Extract the first word of "dtc", so it can be a program name with args. +set dummy dtc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_DTC+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $DTC in + [\\/]* | ?:[\\/]*) + ac_cv_path_DTC="$DTC" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_DTC="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_DTC" && ac_cv_path_DTC="no" + ;; +esac +fi +DTC=$ac_cv_path_DTC +if test -n "$DTC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DTC" >&5 +$as_echo "$DTC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test x"$DTC" == xno; then : + as_fn_error $? "device-tree-compiler not found" "$LINENO" 5 +fi + +cat >>confdefs.h <<_ACEOF +#define DTC "dtc" +_ACEOF + + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_cxx_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_cxx_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 +$as_echo_n "checking whether byte ordering is bigendian... " >&6; } +if ${ac_cv_c_bigendian+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_bigendian=unknown + # See if we're dealing with a universal compiler. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifndef __APPLE_CC__ + not a universal capable compiler + #endif + typedef int dummy; + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + # Check for potential -arch flags. It is not universal unless + # there are at least two -arch flags with different values. + ac_arch= + ac_prev= + for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do + if test -n "$ac_prev"; then + case $ac_word in + i?86 | x86_64 | ppc | ppc64) + if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then + ac_arch=$ac_word + else + ac_cv_c_bigendian=universal + break + fi + ;; + esac + ac_prev= + elif test "x$ac_word" = "x-arch"; then + ac_prev=arch + fi + done +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + if test $ac_cv_c_bigendian = unknown; then + # See if sys/param.h defines the BYTE_ORDER macro. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ + && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ + && LITTLE_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + # It does; now see whether it defined to BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + #include + +int +main () +{ +#if BYTE_ORDER != BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) + bogus endian macros + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + # It does; now see whether it defined to _BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +#ifndef _BIG_ENDIAN + not big endian + #endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes +else + ac_cv_c_bigendian=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # Compile a test program. + if test "$cross_compiling" = yes; then : + # Try to guess by grepping values from an object file. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +short int ascii_mm[] = + { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; + short int ascii_ii[] = + { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; + int use_ascii (int i) { + return ascii_mm[i] + ascii_ii[i]; + } + short int ebcdic_ii[] = + { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; + short int ebcdic_mm[] = + { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; + int use_ebcdic (int i) { + return ebcdic_mm[i] + ebcdic_ii[i]; + } + extern int foo; + +int +main () +{ +return use_ascii (foo) == use_ebcdic (foo); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then + ac_cv_c_bigendian=yes + fi + if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then + if test "$ac_cv_c_bigendian" = unknown; then + ac_cv_c_bigendian=no + else + # finding both strings is unlikely to happen, but who knows? + ac_cv_c_bigendian=unknown + fi + fi +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ + + /* Are we little or big endian? From Harbison&Steele. */ + union + { + long int l; + char c[sizeof (long int)]; + } u; + u.l = 1; + return u.c[sizeof (long int) - 1] == 1; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_run "$LINENO"; then : + ac_cv_c_bigendian=no +else + ac_cv_c_bigendian=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 +$as_echo "$ac_cv_c_bigendian" >&6; } + case $ac_cv_c_bigendian in #( + yes) + $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h +;; #( + no) + ;; #( + universal) + +$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h + + ;; #( + *) + as_fn_error $? "unknown endianness + presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; + esac + + +#------------------------------------------------------------------------- +# MCPPBS specific program checks +#------------------------------------------------------------------------- +# These macros check to see if we can do a stow-based install and also +# check for an isa simulator suitable for running the unit test programs +# via the makefile. + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + + + + # Configure command line option + + # Check whether --enable-stow was given. +if test "${enable_stow+set}" = set; then : + enableval=$enable_stow; enable_stow="yes" +else + enable_stow="no" +fi + + + + + # Environment variables + + + + + # Check for install script + + + + +#------------------------------------------------------------------------- +# Checks for header files +#------------------------------------------------------------------------- + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_cxx_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + + +#------------------------------------------------------------------------- +# Checks for type +#------------------------------------------------------------------------- + +ac_fn_cxx_check_type "$LINENO" "__int128_t" "ac_cv_type___int128_t" "$ac_includes_default" +if test "x$ac_cv_type___int128_t" = xyes; then : + HAVE_INT128=yes + +fi + + +#------------------------------------------------------------------------- +# Default compiler flags +#------------------------------------------------------------------------- + + + + + +for flag in -Wl,--export-dynamic; do + as_CACHEVAR=`$as_echo "ax_cv_check_ldflags__$flag" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the linker accepts $flag" >&5 +$as_echo_n "checking whether the linker accepts $flag... " >&6; } +if eval \${$as_CACHEVAR+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$LDFLAGS + LDFLAGS="$LDFLAGS $flag" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_CACHEVAR=yes" +else + eval "$as_CACHEVAR=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$ax_check_save_flags +fi +eval ac_res=\$$as_CACHEVAR + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_CACHEVAR"\" = x"yes"; then : + +if ${LDFLAGS+:} false; then : + + case " $LDFLAGS " in #( + *" $flag "*) : + { { $as_echo "$as_me:${as_lineno-$LINENO}: : LDFLAGS already contains \$flag"; } >&5 + (: LDFLAGS already contains $flag) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } ;; #( + *) : + + as_fn_append LDFLAGS " $flag" + { { $as_echo "$as_me:${as_lineno-$LINENO}: : LDFLAGS=\"\$LDFLAGS\""; } >&5 + (: LDFLAGS="$LDFLAGS") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + ;; +esac + +else + + LDFLAGS=$flag + { { $as_echo "$as_me:${as_lineno-$LINENO}: : LDFLAGS=\"\$LDFLAGS\""; } >&5 + (: LDFLAGS="$LDFLAGS") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + +fi + +else + : +fi + +done + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether C++ compiler accepts -relocatable-pch" >&5 +$as_echo_n "checking whether C++ compiler accepts -relocatable-pch... " >&6; } +if ${ax_cv_check_cxxflags___relocatable_pch+:} false; then : + $as_echo_n "(cached) " >&6 +else + + ax_check_save_flags=$CXXFLAGS + CXXFLAGS="$CXXFLAGS -relocatable-pch" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ax_cv_check_cxxflags___relocatable_pch=yes +else + ax_cv_check_cxxflags___relocatable_pch=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + CXXFLAGS=$ax_check_save_flags +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_check_cxxflags___relocatable_pch" >&5 +$as_echo "$ax_cv_check_cxxflags___relocatable_pch" >&6; } +if test "x$ax_cv_check_cxxflags___relocatable_pch" = xyes; then : + HAVE_CLANG_PCH=yes + +else + : +fi + + +#------------------------------------------------------------------------- +# MCPPBS subproject list +#------------------------------------------------------------------------- +# Order list so that subprojects only depend on those listed earlier. +# The '*' suffix indicates an optional subproject. The '**' suffix +# indicates an optional subproject which is also the name of a group. + + + + # Add command line argument to enable all optional subprojects + + # Check whether --enable-optional-subprojects was given. +if test "${enable_optional_subprojects+set}" = set; then : + enableval=$enable_optional_subprojects; +fi + + + # Loop through the subprojects given in the macro argument + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects fesvr" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : fesvr" >&5 +$as_echo "$as_me: configuring default subproject : fesvr" >&6;} + ac_config_files="$ac_config_files fesvr.mk:fesvr/fesvr.mk.in" + + enable_fesvr_sproj="yes" + subprojects_enabled="$subprojects_enabled fesvr" + +$as_echo "#define FESVR_ENABLED /**/" >>confdefs.h + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 +$as_echo_n "checking for pthread_create in -lpthread... " >&6; } +if ${ac_cv_lib_pthread_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_pthread_pthread_create=yes +else + ac_cv_lib_pthread_pthread_create=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 +$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } +if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBPTHREAD 1 +_ACEOF + + LIBS="-lpthread $LIBS" + +else + as_fn_error $? "libpthread is required" "$LINENO" 5 +fi + + +ac_fn_cxx_check_member "$LINENO" "struct statx" "stx_ino" "ac_cv_member_struct_statx_stx_ino" "$ac_includes_default" +if test "x$ac_cv_member_struct_statx_stx_ino" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STATX 1 +_ACEOF + +fi + + +ac_fn_cxx_check_member "$LINENO" "struct statx" "stx_mnt_id" "ac_cv_member_struct_statx_stx_mnt_id" "$ac_includes_default" +if test "x$ac_cv_member_struct_statx_stx_mnt_id" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STATX_MNT_ID 1 +_ACEOF + +fi + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects riscv" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : riscv" >&5 +$as_echo "$as_me: configuring default subproject : riscv" >&6;} + ac_config_files="$ac_config_files riscv.mk:riscv/riscv.mk.in" + + enable_riscv_sproj="yes" + subprojects_enabled="$subprojects_enabled riscv" + +$as_echo "#define RISCV_ENABLED /**/" >>confdefs.h + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + +# Check whether --with-boost was given. +if test "${with_boost+set}" = set; then : + withval=$with_boost; + case $withval in #( + no) : + want_boost="no";_AX_BOOST_BASE_boost_path="" ;; #( + yes) : + want_boost="yes";_AX_BOOST_BASE_boost_path="" ;; #( + *) : + want_boost="yes";_AX_BOOST_BASE_boost_path="$withval" ;; +esac + +else + want_boost="yes" +fi + + + + +# Check whether --with-boost-libdir was given. +if test "${with_boost_libdir+set}" = set; then : + withval=$with_boost_libdir; + if test -d "$withval"; then : + _AX_BOOST_BASE_boost_lib_path="$withval" +else + as_fn_error $? "--with-boost-libdir expected directory name" "$LINENO" 5 +fi + +else + _AX_BOOST_BASE_boost_lib_path="" +fi + + +BOOST_LDFLAGS="" +BOOST_CPPFLAGS="" +if test "x$want_boost" = "xyes"; then : + + + if test "x1.53" = "x"; then : + _AX_BOOST_BASE_TONUMERICVERSION_req="1.20.0" +else + _AX_BOOST_BASE_TONUMERICVERSION_req="1.53" +fi + _AX_BOOST_BASE_TONUMERICVERSION_req_shorten=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([0-9]*\.[0-9]*\)'` + _AX_BOOST_BASE_TONUMERICVERSION_req_major=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '\([0-9]*\)'` + if test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_major" = "x"; then : + as_fn_error $? "You should at least specify libboost major version" "$LINENO" 5 +fi + _AX_BOOST_BASE_TONUMERICVERSION_req_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[0-9]*\.\([0-9]*\)'` + if test "x$_AX_BOOST_BASE_TONUMERICVERSION_req_minor" = "x"; then : + _AX_BOOST_BASE_TONUMERICVERSION_req_minor="0" +fi + _AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req : '[0-9]*\.[0-9]*\.\([0-9]*\)'` + if test "X$_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor" = "X"; then : + _AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor="0" +fi + _AX_BOOST_BASE_TONUMERICVERSION_RET=`expr $_AX_BOOST_BASE_TONUMERICVERSION_req_major \* 100000 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_minor \* 100 \+ $_AX_BOOST_BASE_TONUMERICVERSION_req_sub_minor` + WANT_BOOST_VERSION=$_AX_BOOST_BASE_TONUMERICVERSION_RET + + succeeded=no + + + + case ${host_cpu} in #( + x86_64) : + libsubdirs="lib64 libx32 lib lib64" ;; #( + mips*64*) : + libsubdirs="lib64 lib32 lib lib64" ;; #( + ppc64|powerpc64|s390x|sparc64|aarch64|ppc64le|powerpc64le|riscv64|e2k) : + libsubdirs="lib64 lib lib64" ;; #( + *) : + libsubdirs="lib" + ;; +esac + + case ${host_cpu} in #( + i?86) : + multiarch_libsubdir="lib/i386-${host_os}" ;; #( + armv7l) : + multiarch_libsubdir="lib/arm-${host_os}" ;; #( + *) : + multiarch_libsubdir="lib/${host_cpu}-${host_os}" + ;; +esac + + if test "x$_AX_BOOST_BASE_boost_path" != "x"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for boostlib >= 1.53 ($WANT_BOOST_VERSION) includes in \"$_AX_BOOST_BASE_boost_path/include\"" >&5 +$as_echo_n "checking for boostlib >= 1.53 ($WANT_BOOST_VERSION) includes in \"$_AX_BOOST_BASE_boost_path/include\"... " >&6; } + if test -d "$_AX_BOOST_BASE_boost_path/include" && test -r "$_AX_BOOST_BASE_boost_path/include"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include" + for _AX_BOOST_BASE_boost_path_tmp in $multiarch_libsubdir $libsubdirs; do + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for boostlib >= 1.53 ($WANT_BOOST_VERSION) lib path in \"$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp\"" >&5 +$as_echo_n "checking for boostlib >= 1.53 ($WANT_BOOST_VERSION) lib path in \"$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp\"... " >&6; } + if test -d "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" && test -r "$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp" ; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$_AX_BOOST_BASE_boost_path_tmp"; + break; + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + done +else + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + +else + + if test X"$cross_compiling" = Xyes; then + search_libsubdirs=$multiarch_libsubdir + else + search_libsubdirs="$multiarch_libsubdir $libsubdirs" + fi + for _AX_BOOST_BASE_boost_path_tmp in /usr /usr/local /opt /opt/local ; do + if test -d "$_AX_BOOST_BASE_boost_path_tmp/include/boost" && test -r "$_AX_BOOST_BASE_boost_path_tmp/include/boost" ; then + for libsubdir in $search_libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path_tmp/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path_tmp/$libsubdir" + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path_tmp/include" + break; + fi + done + +fi + + if test "x$_AX_BOOST_BASE_boost_lib_path" != "x"; then : + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_lib_path" +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for boostlib >= 1.53 ($WANT_BOOST_VERSION)" >&5 +$as_echo_n "checking for boostlib >= 1.53 ($WANT_BOOST_VERSION)... " >&6; } + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + +(void) ((void)sizeof(char[1 - 2*!!((BOOST_VERSION) < ($WANT_BOOST_VERSION))])); + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + succeeded=yes + found_system=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + + if test "x$succeeded" != "xyes" ; then + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + BOOST_CPPFLAGS= + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + BOOST_LDFLAGS= + fi + _version=0 + if test -n "$_AX_BOOST_BASE_boost_path" ; then + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path"; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "x$V_CHECK" = "x1" ; then + _version=$_version_tmp + fi + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path/include/boost-$VERSION_UNDERSCORE" + done + if test -z "$BOOST_CPPFLAGS"; then + if test -d "$_AX_BOOST_BASE_boost_path/boost" && test -r "$_AX_BOOST_BASE_boost_path/boost"; then + BOOST_CPPFLAGS="-I$_AX_BOOST_BASE_boost_path" + fi + fi + if test -n "$BOOST_CPPFLAGS" && test -z "$BOOST_LDFLAGS"; then + for libsubdir in $libsubdirs ; do + if ls "$_AX_BOOST_BASE_boost_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$_AX_BOOST_BASE_boost_path/$libsubdir" + fi + fi + else + if test "x$cross_compiling" != "xyes" ; then + for _AX_BOOST_BASE_boost_path in /usr /usr/local /opt /opt/local ; do + if test -d "$_AX_BOOST_BASE_boost_path" && test -r "$_AX_BOOST_BASE_boost_path" ; then + for i in `ls -d $_AX_BOOST_BASE_boost_path/include/boost-* 2>/dev/null`; do + _version_tmp=`echo $i | sed "s#$_AX_BOOST_BASE_boost_path##" | sed 's/\/include\/boost-//' | sed 's/_/./'` + V_CHECK=`expr $_version_tmp \> $_version` + if test "x$V_CHECK" = "x1" ; then + _version=$_version_tmp + best_path=$_AX_BOOST_BASE_boost_path + fi + done + fi + done + + VERSION_UNDERSCORE=`echo $_version | sed 's/\./_/'` + BOOST_CPPFLAGS="-I$best_path/include/boost-$VERSION_UNDERSCORE" + if test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + for libsubdir in $libsubdirs ; do + if ls "$best_path/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + BOOST_LDFLAGS="-L$best_path/$libsubdir" + fi + fi + + if test -n "$BOOST_ROOT" ; then + for libsubdir in $libsubdirs ; do + if ls "$BOOST_ROOT/stage/$libsubdir/libboost_"* >/dev/null 2>&1 ; then break; fi + done + if test -d "$BOOST_ROOT" && test -r "$BOOST_ROOT" && test -d "$BOOST_ROOT/stage/$libsubdir" && test -r "$BOOST_ROOT/stage/$libsubdir"; then + version_dir=`expr //$BOOST_ROOT : '.*/\(.*\)'` + stage_version=`echo $version_dir | sed 's/boost_//' | sed 's/_/./g'` + stage_version_shorten=`expr $stage_version : '\([0-9]*\.[0-9]*\)'` + V_CHECK=`expr $stage_version_shorten \>\= $_version` + if test "x$V_CHECK" = "x1" && test -z "$_AX_BOOST_BASE_boost_lib_path" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: We will use a staged boost library from $BOOST_ROOT" >&5 +$as_echo "$as_me: We will use a staged boost library from $BOOST_ROOT" >&6;} + BOOST_CPPFLAGS="-I$BOOST_ROOT" + BOOST_LDFLAGS="-L$BOOST_ROOT/stage/$libsubdir" + fi + fi + fi + fi + + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +#include + +int +main () +{ + +(void) ((void)sizeof(char[1 - 2*!!((BOOST_VERSION) < ($WANT_BOOST_VERSION))])); + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + succeeded=yes + found_system=yes + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + fi + + if test "x$succeeded" != "xyes" ; then + if test "x$_version" = "x0" ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: We could not detect the boost libraries (version 1.53 or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation." >&5 +$as_echo "$as_me: We could not detect the boost libraries (version 1.53 or higher). If you have a staged boost library (still not installed) please specify \$BOOST_ROOT in your environment and do not give a PATH to --with-boost option. If you are sure you have boost installed, then check your version number looking in . See http://randspringer.de/boost for more documentation." >&6;} + else + { $as_echo "$as_me:${as_lineno-$LINENO}: Your boost libraries seems to old (version $_version)." >&5 +$as_echo "$as_me: Your boost libraries seems to old (version $_version)." >&6;} + fi + # execute ACTION-IF-NOT-FOUND (if present): + : + else + +$as_echo "#define HAVE_BOOST /**/" >>confdefs.h + + # execute ACTION-IF-FOUND (if present): + : + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + + +fi + + + + + +# Check whether --with-boost-asio was given. +if test "${with_boost_asio+set}" = set; then : + withval=$with_boost_asio; + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ax_boost_user_asio_lib="" + else + want_boost="yes" + ax_boost_user_asio_lib="$withval" + fi + +else + want_boost="yes" + +fi + + + if test "x$want_boost" = "xyes"; then + + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the Boost::ASIO library is available" >&5 +$as_echo_n "checking whether the Boost::ASIO library is available... " >&6; } +if ${ax_cv_boost_asio+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + #include + +int +main () +{ + + + boost::asio::io_service io; + boost::system::error_code timer_result; + boost::asio::deadline_timer t(io); + t.cancel(); + io.run_one(); + return 0; + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ax_cv_boost_asio=yes +else + ax_cv_boost_asio=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_boost_asio" >&5 +$as_echo "$ax_cv_boost_asio" >&6; } + if test "x$ax_cv_boost_asio" = "xyes"; then + +$as_echo "#define HAVE_BOOST_ASIO /**/" >>confdefs.h + + BN=boost_system + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/[^\/]*//'` + if test "x$ax_boost_user_asio_lib" = "x"; then + for ax_lib in `ls $BOOSTLIBDIR/libboost_system*.so* $BOOSTLIBDIR/libboost_system*.dylib* $BOOSTLIBDIR/libboost_system*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_system.*\)\.so.*$;\1;' -e 's;^lib\(boost_system.*\)\.dylib.*$;\1;' -e 's;^lib\(boost_system.*\)\.a.*$;\1;' ` ; do + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_main" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -l$ax_lib" >&5 +$as_echo_n "checking for main in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_ASIO_LIB="-l$ax_lib" link_thread="yes" break +else + link_thread="no" +fi + + done + else + for ax_lib in $ax_boost_user_asio_lib $BN-$ax_boost_user_asio_lib; do + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_main" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -l$ax_lib" >&5 +$as_echo_n "checking for main in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_ASIO_LIB="-l$ax_lib" link_asio="yes" break +else + link_asio="no" +fi + + done + + fi + if test "x$ax_lib" = "x"; then + as_fn_error $? "Could not find a version of the Boost::Asio library!" "$LINENO" 5 + fi + if test "x$link_asio" = "xno"; then + as_fn_error $? "Could not link against $ax_lib !" "$LINENO" 5 + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi + + + +# Check whether --with-boost-regex was given. +if test "${with_boost_regex+set}" = set; then : + withval=$with_boost_regex; + if test "$withval" = "no"; then + want_boost="no" + elif test "$withval" = "yes"; then + want_boost="yes" + ax_boost_user_regex_lib="" + else + want_boost="yes" + ax_boost_user_regex_lib="$withval" + fi + +else + want_boost="yes" + +fi + + + if test "x$want_boost" = "xyes"; then + + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the Boost::Regex library is available" >&5 +$as_echo_n "checking whether the Boost::Regex library is available... " >&6; } +if ${ax_cv_boost_regex+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +int +main () +{ +boost::regex r(); return 0; + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ax_cv_boost_regex=yes +else + ax_cv_boost_regex=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_boost_regex" >&5 +$as_echo "$ax_cv_boost_regex" >&6; } + if test "x$ax_cv_boost_regex" = "xyes"; then + +$as_echo "#define HAVE_BOOST_REGEX /**/" >>confdefs.h + + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/[^\/]*//'` + if test "x$ax_boost_user_regex_lib" = "x"; then + for libextension in `ls $BOOSTLIBDIR/libboost_regex*.so* $BOOSTLIBDIR/libboost_regex*.dylib* $BOOSTLIBDIR/libboost_regex*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^lib\(boost_regex.*\)\.so.*$;\1;' -e 's;^lib\(boost_regex.*\)\.dylib.*;\1;' -e 's;^lib\(boost_regex.*\)\.a.*$;\1;'` ; do + ax_lib=${libextension} + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_exit" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for exit in -l$ax_lib" >&5 +$as_echo_n "checking for exit in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char exit (); +int +main () +{ +return exit (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_REGEX_LIB="-l$ax_lib"; link_regex="yes"; break +else + link_regex="no" +fi + + done + if test "x$link_regex" != "xyes"; then + for libextension in `ls $BOOSTLIBDIR/boost_regex*.dll* $BOOSTLIBDIR/boost_regex*.a* 2>/dev/null | sed 's,.*/,,' | sed -e 's;^\(boost_regex.*\)\.dll.*$;\1;' -e 's;^\(boost_regex.*\)\.a.*$;\1;'` ; do + ax_lib=${libextension} + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_exit" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for exit in -l$ax_lib" >&5 +$as_echo_n "checking for exit in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char exit (); +int +main () +{ +return exit (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_REGEX_LIB="-l$ax_lib"; link_regex="yes"; break +else + link_regex="no" +fi + + done + fi + + else + for ax_lib in $ax_boost_user_regex_lib boost_regex-$ax_boost_user_regex_lib; do + as_ac_Lib=`$as_echo "ac_cv_lib_$ax_lib''_main" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -l$ax_lib" >&5 +$as_echo_n "checking for main in -l$ax_lib... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$ax_lib $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + BOOST_REGEX_LIB="-l$ax_lib"; link_regex="yes"; break +else + link_regex="no" +fi + + done + fi + if test "x$ax_lib" = "x"; then + as_fn_error $? "Could not find a version of the Boost::Regex library!" "$LINENO" 5 + fi + if test "x$link_regex" != "xyes"; then + as_fn_error $? "Could not link against $ax_lib !" "$LINENO" 5 + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lboost_system" >&5 +$as_echo_n "checking for main in -lboost_system... " >&6; } +if ${ac_cv_lib_boost_system_main+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lboost_system $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_boost_system_main=yes +else + ac_cv_lib_boost_system_main=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_boost_system_main" >&5 +$as_echo "$ac_cv_lib_boost_system_main" >&6; } +if test "x$ac_cv_lib_boost_system_main" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBBOOST_SYSTEM 1 +_ACEOF + + LIBS="-lboost_system $LIBS" + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lboost_regex" >&5 +$as_echo_n "checking for main in -lboost_regex... " >&6; } +if ${ac_cv_lib_boost_regex_main+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lboost_regex $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_boost_regex_main=yes +else + ac_cv_lib_boost_regex_main=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_boost_regex_main" >&5 +$as_echo "$ac_cv_lib_boost_regex_main" >&6; } +if test "x$ac_cv_lib_boost_regex_main" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBBOOST_REGEX 1 +_ACEOF + + LIBS="-lboost_regex $LIBS" + +fi + + + +# Check whether --with-isa was given. +if test "${with_isa+set}" = set; then : + withval=$with_isa; +cat >>confdefs.h <<_ACEOF +#define DEFAULT_ISA "$withval" +_ACEOF + +else + +cat >>confdefs.h <<_ACEOF +#define DEFAULT_ISA "RV64IMAFDC" +_ACEOF + +fi + + + +# Check whether --with-priv was given. +if test "${with_priv+set}" = set; then : + withval=$with_priv; +cat >>confdefs.h <<_ACEOF +#define DEFAULT_PRIV "$withval" +_ACEOF + +else + +cat >>confdefs.h <<_ACEOF +#define DEFAULT_PRIV "MSU" +_ACEOF + +fi + + + +# Check whether --with-varch was given. +if test "${with_varch+set}" = set; then : + withval=$with_varch; +cat >>confdefs.h <<_ACEOF +#define DEFAULT_VARCH "$withval" +_ACEOF + +else + +cat >>confdefs.h <<_ACEOF +#define DEFAULT_VARCH "vlen:128,elen:64" +_ACEOF + +fi + + + +# Check whether --with-target was given. +if test "${with_target+set}" = set; then : + withval=$with_target; +cat >>confdefs.h <<_ACEOF +#define TARGET_ARCH "$withval" +_ACEOF + +else + +cat >>confdefs.h <<_ACEOF +#define TARGET_ARCH "riscv64-unknown-elf" +_ACEOF + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 +$as_echo_n "checking for library containing dlopen... " >&6; } +if ${ac_cv_search_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +for ac_lib in '' dl dld; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_search_dlopen=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_dlopen+:} false; then : + break +fi +done +if ${ac_cv_search_dlopen+:} false; then : + +else + ac_cv_search_dlopen=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 +$as_echo "$ac_cv_search_dlopen" >&6; } +ac_res=$ac_cv_search_dlopen +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + + +$as_echo "#define HAVE_DLOPEN /**/" >>confdefs.h + + HAVE_DLOPEN=yes + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 +$as_echo_n "checking for pthread_create in -lpthread... " >&6; } +if ${ac_cv_lib_pthread_pthread_create+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpthread $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_create (); +int +main () +{ +return pthread_create (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_pthread_pthread_create=yes +else + ac_cv_lib_pthread_pthread_create=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 +$as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } +if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBPTHREAD 1 +_ACEOF + + LIBS="-lpthread $LIBS" + +else + as_fn_error $? "libpthread is required" "$LINENO" 5 +fi + + +# Check whether --enable-commitlog was given. +if test "${enable_commitlog+set}" = set; then : + enableval=$enable_commitlog; +fi + +if test "x$enable_commitlog" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_COMMITLOG /**/" >>confdefs.h + + +fi + +# Check whether --enable-histogram was given. +if test "${enable_histogram+set}" = set; then : + enableval=$enable_histogram; +fi + +if test "x$enable_histogram" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_HISTOGRAM /**/" >>confdefs.h + + +fi + +# Check whether --enable-dirty was given. +if test "${enable_dirty+set}" = set; then : + enableval=$enable_dirty; +fi + +if test "x$enable_dirty" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_DIRTY /**/" >>confdefs.h + + +fi + +# Check whether --enable-misaligned was given. +if test "${enable_misaligned+set}" = set; then : + enableval=$enable_misaligned; +fi + +if test "x$enable_misaligned" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_MISALIGNED /**/" >>confdefs.h + + +fi + +# Check whether --enable-dual-endian was given. +if test "${enable_dual_endian+set}" = set; then : + enableval=$enable_dual_endian; +fi + +if test "x$enable_dual_endian" = "xyes"; then : + + +$as_echo "#define RISCV_ENABLE_DUAL_ENDIAN /**/" >>confdefs.h + + +fi + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects disasm" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : disasm" >&5 +$as_echo "$as_me: configuring default subproject : disasm" >&6;} + ac_config_files="$ac_config_files disasm.mk:disasm/disasm.mk.in" + + enable_disasm_sproj="yes" + subprojects_enabled="$subprojects_enabled disasm" + +$as_echo "#define DISASM_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects customext" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : customext" >&5 +$as_echo "$as_me: configuring default subproject : customext" >&6;} + ac_config_files="$ac_config_files customext.mk:customext/customext.mk.in" + + enable_customext_sproj="yes" + subprojects_enabled="$subprojects_enabled customext" + +$as_echo "#define CUSTOMEXT_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects fdt" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : fdt" >&5 +$as_echo "$as_me: configuring default subproject : fdt" >&6;} + ac_config_files="$ac_config_files fdt.mk:fdt/fdt.mk.in" + + enable_fdt_sproj="yes" + subprojects_enabled="$subprojects_enabled fdt" + +$as_echo "#define FDT_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects softfloat" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : softfloat" >&5 +$as_echo "$as_me: configuring default subproject : softfloat" >&6;} + ac_config_files="$ac_config_files softfloat.mk:softfloat/softfloat.mk.in" + + enable_softfloat_sproj="yes" + subprojects_enabled="$subprojects_enabled softfloat" + +$as_echo "#define SOFTFLOAT_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects spike_main" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : spike_main" >&5 +$as_echo "$as_me: configuring default subproject : spike_main" >&6;} + ac_config_files="$ac_config_files spike_main.mk:spike_main/spike_main.mk.in" + + enable_spike_main_sproj="yes" + subprojects_enabled="$subprojects_enabled spike_main" + +$as_echo "#define SPIKE_MAIN_ENABLED /**/" >>confdefs.h + + + + + + + # Determine if this is a required or an optional subproject + + + + # Determine if there is a group with the same name + + + + # Create variations of the subproject name suitable for use as a CPP + # enabled define, a shell enabled variable, and a shell function + + + + + + + + + + + + # Add subproject to our running list + + subprojects="$subprojects spike_dasm" + + # Process the subproject appropriately. If enabled add it to the + # $enabled_subprojects running shell variable, set a + # SUBPROJECT_ENABLED C define, and include the appropriate + # 'subproject.ac'. + + + { $as_echo "$as_me:${as_lineno-$LINENO}: configuring default subproject : spike_dasm" >&5 +$as_echo "$as_me: configuring default subproject : spike_dasm" >&6;} + ac_config_files="$ac_config_files spike_dasm.mk:spike_dasm/spike_dasm.mk.in" + + enable_spike_dasm_sproj="yes" + subprojects_enabled="$subprojects_enabled spike_dasm" + +$as_echo "#define SPIKE_DASM_ENABLED /**/" >>confdefs.h + + + + + + + # Output make variables + + + + + + +#------------------------------------------------------------------------- +# MCPPBS subproject groups +#------------------------------------------------------------------------- +# If a group has the same name as a subproject then you must add the +# '**' suffix in the subproject list above. The list of subprojects in a +# group should be ordered so that subprojets only depend on those listed +# earlier. Here is an example: +# +# MCPPBS_GROUP( [group-name], [sproja,sprojb,...] ) +# + +#------------------------------------------------------------------------- +# Output +#------------------------------------------------------------------------- + +ac_config_headers="$ac_config_headers config.h" + +ac_config_files="$ac_config_files Makefile" + +ac_config_files="$ac_config_files riscv-fesvr.pc" + +ac_config_files="$ac_config_files riscv-disasm.pc" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by RISC-V ISA Simulator $as_me ?, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +RISC-V ISA Simulator config.status ? +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "fesvr.mk") CONFIG_FILES="$CONFIG_FILES fesvr.mk:fesvr/fesvr.mk.in" ;; + "riscv.mk") CONFIG_FILES="$CONFIG_FILES riscv.mk:riscv/riscv.mk.in" ;; + "disasm.mk") CONFIG_FILES="$CONFIG_FILES disasm.mk:disasm/disasm.mk.in" ;; + "customext.mk") CONFIG_FILES="$CONFIG_FILES customext.mk:customext/customext.mk.in" ;; + "fdt.mk") CONFIG_FILES="$CONFIG_FILES fdt.mk:fdt/fdt.mk.in" ;; + "softfloat.mk") CONFIG_FILES="$CONFIG_FILES softfloat.mk:softfloat/softfloat.mk.in" ;; + "spike_main.mk") CONFIG_FILES="$CONFIG_FILES spike_main.mk:spike_main/spike_main.mk.in" ;; + "spike_dasm.mk") CONFIG_FILES="$CONFIG_FILES spike_dasm.mk:spike_dasm/spike_dasm.mk.in" ;; + "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "riscv-fesvr.pc") CONFIG_FILES="$CONFIG_FILES riscv-fesvr.pc" ;; + "riscv-disasm.pc") CONFIG_FILES="$CONFIG_FILES riscv-disasm.pc" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi + ;; + + + esac + +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + diff --git a/vendor/riscv-isa-sim/configure.ac b/vendor/riscv-isa-sim/configure.ac new file mode 100644 index 00000000..13797a0b --- /dev/null +++ b/vendor/riscv-isa-sim/configure.ac @@ -0,0 +1,126 @@ +#========================================================================= +# Toplevel configure.ac for the Modular C++ Build System +#========================================================================= +# Please read the documenation in 'mcppbs-doc.txt' for more details on +# how the Modular C++ Build System works. For most new projects, a +# developer will only need to make the following changes: +# +# - change the project metadata listed right below +# - update the list of subprojects via the 'MCPPBS_SUBPROJECTS' macro +# - possibly add subproject groups if needed to ease configuration +# - add more configure checks for platform specific configuration +# + +#------------------------------------------------------------------------- +# Project metadata +#------------------------------------------------------------------------- + +m4_define( proj_name, [RISC-V ISA Simulator]) +m4_define( proj_maintainer, [Andrew Waterman]) +m4_define( proj_abbreviation, [spike]) + +#------------------------------------------------------------------------- +# Project version information +#------------------------------------------------------------------------- +# Version information is meant to be managed through a version control +# system's tags and revision numbers. In a working copy the version will +# not be defined here (you should just use the version control system's +# mechanisms). When we make a distribution then we can set the version +# here as formed by the scripts/vcs-version.sh script so that the +# distribution knows what version it came from. If you are not using +# version control then it is fine to set this directly. + +m4_define( proj_version, [?]) + +#------------------------------------------------------------------------- +# Setup +#------------------------------------------------------------------------- + +AC_INIT(proj_name,proj_version,proj_maintainer,proj_abbreviation) +AC_LANG_CPLUSPLUS +AC_CONFIG_SRCDIR([riscv/common.h]) +AC_CONFIG_AUX_DIR([scripts]) +AC_CANONICAL_BUILD +AC_CANONICAL_HOST + +m4_include(ax_require_defined.m4) +m4_include(ax_append_flag.m4) +m4_include(ax_check_compile_flag.m4) +m4_include(ax_check_link_flag.m4) +m4_include(ax_append_link_flags.m4) +m4_include(ax_boost_base.m4) +m4_include(ax_boost_asio.m4) +m4_include(ax_boost_regex.m4) + +#------------------------------------------------------------------------- +# Checks for programs +#------------------------------------------------------------------------- + +AC_PROG_CC +AC_PROG_CXX +AC_CHECK_TOOL([AR],[ar]) +AC_CHECK_TOOL([RANLIB],[ranlib]) +AC_PATH_PROG([DTC],[dtc],[no]) +AS_IF([test x"$DTC" == xno],AC_MSG_ERROR([device-tree-compiler not found])) +AC_DEFINE_UNQUOTED(DTC, ["dtc"], [Executable name of device-tree-compiler]) + +AC_C_BIGENDIAN + +#------------------------------------------------------------------------- +# MCPPBS specific program checks +#------------------------------------------------------------------------- +# These macros check to see if we can do a stow-based install and also +# check for an isa simulator suitable for running the unit test programs +# via the makefile. + +MCPPBS_PROG_INSTALL + +#------------------------------------------------------------------------- +# Checks for header files +#------------------------------------------------------------------------- + +AC_HEADER_STDC + +#------------------------------------------------------------------------- +# Checks for type +#------------------------------------------------------------------------- + +AC_CHECK_TYPE([__int128_t], AC_SUBST([HAVE_INT128],[yes])) + +#------------------------------------------------------------------------- +# Default compiler flags +#------------------------------------------------------------------------- + +AX_APPEND_LINK_FLAGS([-Wl,--export-dynamic]) + +AX_CHECK_COMPILE_FLAG([-relocatable-pch], AC_SUBST([HAVE_CLANG_PCH],[yes])) + +#------------------------------------------------------------------------- +# MCPPBS subproject list +#------------------------------------------------------------------------- +# Order list so that subprojects only depend on those listed earlier. +# The '*' suffix indicates an optional subproject. The '**' suffix +# indicates an optional subproject which is also the name of a group. + +MCPPBS_SUBPROJECTS([ fesvr, riscv, disasm, customext, fdt, softfloat, spike_main, spike_dasm ]) + +#------------------------------------------------------------------------- +# MCPPBS subproject groups +#------------------------------------------------------------------------- +# If a group has the same name as a subproject then you must add the +# '**' suffix in the subproject list above. The list of subprojects in a +# group should be ordered so that subprojets only depend on those listed +# earlier. Here is an example: +# +# MCPPBS_GROUP( [group-name], [sproja,sprojb,...] ) +# + +#------------------------------------------------------------------------- +# Output +#------------------------------------------------------------------------- + +AC_CONFIG_HEADERS([config.h]) +AC_CONFIG_FILES([Makefile]) +AC_CONFIG_FILES([riscv-fesvr.pc]) +AC_CONFIG_FILES([riscv-disasm.pc]) +AC_OUTPUT diff --git a/vendor/riscv-isa-sim/customext/cflush.cc b/vendor/riscv-isa-sim/customext/cflush.cc new file mode 100644 index 00000000..1a5cfa2d --- /dev/null +++ b/vendor/riscv-isa-sim/customext/cflush.cc @@ -0,0 +1,42 @@ +#include "insn_macros.h" +#include "extension.h" +#include + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rs1()]; + } +} xrs1; + +static reg_t custom_cflush(processor_t* p, insn_t insn, reg_t pc) +{ + require_privilege(PRV_M); + + return pc + 4; \ +} + +class cflush_t : public extension_t +{ + public: + const char* name() { return "cflush"; } + + cflush_t() {} + + std::vector get_instructions() { + std::vector insns; + insns.push_back((insn_desc_t){true, 0xFC000073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + insns.push_back((insn_desc_t){true, 0xFC200073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + insns.push_back((insn_desc_t){true, 0xFC100073, 0xFFF07FFF, custom_cflush, custom_cflush, custom_cflush, custom_cflush}); + return insns; + } + + std::vector get_disasms() { + std::vector insns; + insns.push_back(new disasm_insn_t("cflush.d.l1", 0xFC000073, 0xFFF07FFF, {&xrs1})); + insns.push_back(new disasm_insn_t("cdiscard.d.l1", 0xFC200073, 0xFFF07FFF, {&xrs1})); + insns.push_back(new disasm_insn_t("cflush.i.l1", 0xFC100073, 0xFFF07FFF, {&xrs1})); + return insns; + } +}; + +REGISTER_EXTENSION(cflush, []() { return new cflush_t; }) diff --git a/vendor/riscv-isa-sim/customext/customext.ac b/vendor/riscv-isa-sim/customext/customext.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/customext/customext.mk.in b/vendor/riscv-isa-sim/customext/customext.mk.in new file mode 100644 index 00000000..a14e771c --- /dev/null +++ b/vendor/riscv-isa-sim/customext/customext.mk.in @@ -0,0 +1,11 @@ +customext_subproject_deps = \ + spike_main \ + riscv \ + disasm \ + softfloat \ + +customext_srcs = \ + dummy_rocc.cc \ + cflush.cc \ + +customext_install_shared_lib = yes diff --git a/vendor/riscv-isa-sim/customext/dummy_rocc.cc b/vendor/riscv-isa-sim/customext/dummy_rocc.cc new file mode 100644 index 00000000..85ab7aa6 --- /dev/null +++ b/vendor/riscv-isa-sim/customext/dummy_rocc.cc @@ -0,0 +1,47 @@ +#include "rocc.h" +#include "mmu.h" +#include + +class dummy_rocc_t : public rocc_t +{ + public: + const char* name() { return "dummy_rocc"; } + + reg_t custom0(rocc_insn_t insn, reg_t xs1, reg_t xs2) + { + reg_t prev_acc = acc[insn.rs2]; + + if (insn.rs2 >= num_acc) + illegal_instruction(); + + switch (insn.funct) + { + case 0: // acc <- xs1 + acc[insn.rs2] = xs1; + break; + case 1: // xd <- acc (the only real work is the return statement below) + break; + case 2: // acc[rs2] <- Mem[xs1] + acc[insn.rs2] = p->get_mmu()->load_uint64(xs1); + break; + case 3: // acc[rs2] <- accX + xs1 + acc[insn.rs2] += xs1; + break; + default: + illegal_instruction(); + } + + return prev_acc; // in all cases, xd <- previous value of acc[rs2] + } + + dummy_rocc_t() + { + memset(acc, 0, sizeof(acc)); + } + + private: + static const int num_acc = 4; + reg_t acc[num_acc]; +}; + +REGISTER_EXTENSION(dummy_rocc, []() { return new dummy_rocc_t; }) diff --git a/vendor/riscv-isa-sim/customext/dummy_rocc_test.c b/vendor/riscv-isa-sim/customext/dummy_rocc_test.c new file mode 100644 index 00000000..94de8c04 --- /dev/null +++ b/vendor/riscv-isa-sim/customext/dummy_rocc_test.c @@ -0,0 +1,29 @@ +// The following is a RISC-V program to test the functionality of the +// dummy RoCC accelerator. +// Compile with riscv64-unknown-elf-gcc dummy_rocc_test.c +// Run with spike --extension=dummy_rocc pk a.out + +#include +#include +#include + +int main() { + uint64_t x = 123, y = 456, z = 0; + // load x into accumulator 2 (funct=0) + asm volatile ("custom0 x0, %0, 2, 0" : : "r"(x)); + // read it back into z (funct=1) to verify it + asm volatile ("custom0 %0, x0, 2, 1" : "=r"(z)); + assert(z == x); + // accumulate 456 into it (funct=3) + asm volatile ("custom0 x0, %0, 2, 3" : : "r"(y)); + // verify it + asm volatile ("custom0 %0, x0, 2, 1" : "=r"(z)); + assert(z == x+y); + // do it all again, but initialize acc2 via memory this time (funct=2) + asm volatile ("custom0 x0, %0, 2, 2" : : "r"(&x)); + asm volatile ("custom0 x0, %0, 2, 3" : : "r"(y)); + asm volatile ("custom0 %0, x0, 2, 1" : "=r"(z)); + assert(z == x+y); + + printf("success!\n"); +} diff --git a/vendor/riscv-isa-sim/debug_rom/.gitignore b/vendor/riscv-isa-sim/debug_rom/.gitignore new file mode 100644 index 00000000..98bd13e4 --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/.gitignore @@ -0,0 +1,5 @@ +/debug_rom +/debug_rom32 +/debug_rom64 +/debug_rom32.h +/debug_rom64.h diff --git a/vendor/riscv-isa-sim/debug_rom/Makefile b/vendor/riscv-isa-sim/debug_rom/Makefile new file mode 100644 index 00000000..c5f2205d --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/Makefile @@ -0,0 +1,24 @@ +# Recursive make is bad, but in this case we're cross compiling which is a +# pretty unusual use case. + +CC = $(RISCV)/bin/riscv64-unknown-elf-gcc +OBJCOPY = $(RISCV)/bin/riscv64-unknown-elf-objcopy + +COMPILE = $(CC) -nostdlib -nostartfiles -I.. -Tlink.ld + +ELFS = debug_rom +DEPS = debug_rom.S link.ld ../riscv/debug_rom_defines.h ../riscv/encoding.h + +all: $(patsubst %,%.h,$(ELFS)) + +%.h: %.raw + xxd -i $^ | sed "s/^unsigned/static const unsigned/" > $@ + +%.raw: % + $(OBJCOPY) -O binary --only-section .text $^ $@ + +debug_rom: $(DEPS) + $(COMPILE) -o $@ $^ + +clean: + rm -f $(ELFS) debug_rom*.raw debug_rom.h diff --git a/vendor/riscv-isa-sim/debug_rom/debug_rom.S b/vendor/riscv-isa-sim/debug_rom/debug_rom.S new file mode 100755 index 00000000..8d8e4cd0 --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/debug_rom.S @@ -0,0 +1,79 @@ +// See LICENSE.SiFive for license details. + +#include "riscv/encoding.h" +#include "riscv/debug_rom_defines.h" + + .option norvc + .global entry + .global exception + + // Entry location on ebreak, Halt, or Breakpoint + // It is the same for all harts. They branch when + // their GO or RESUME bit is set. + +entry: + jal zero, _entry +resume: + // Not used. + jal zero, _resume +exception: + jal zero, _exception + +_entry: + // This fence is required because the execution may have written something + // into the Abstract Data or Program Buffer registers. + fence + csrw CSR_DSCRATCH, s0 // Save s0 to allow signaling MHARTID + + // We continue to let the hart know that we are halted in order that + // a DM which was reset is still made aware that a hart is halted. + // We keep checking both whether there is something the debugger wants + // us to do, or whether we should resume. +entry_loop: + csrr s0, CSR_MHARTID + sw s0, DEBUG_ROM_HALTED(zero) + lbu s0, DEBUG_ROM_FLAGS(s0) // 1 byte flag per hart. Only one hart advances here. + andi s0, s0, (1 << DEBUG_ROM_FLAG_GO) + bnez s0, going + csrr s0, CSR_MHARTID + lbu s0, DEBUG_ROM_FLAGS(s0) // multiple harts can resume here + andi s0, s0, (1 << DEBUG_ROM_FLAG_RESUME) + bnez s0, _resume + wfi + jal zero, entry_loop + +_exception: + // Restore S0, which we always save to dscratch. + // We need this in case the user tried an abstract write to a + // non-existent CSR. + csrr s0, CSR_DSCRATCH + sw zero, DEBUG_ROM_EXCEPTION(zero) // Let debug module know you got an exception. + ebreak + +going: + csrr s0, CSR_MHARTID + sw s0, DEBUG_ROM_GOING(zero) // When debug module sees this write, the GO flag is reset. + csrr s0, CSR_DSCRATCH // Restore s0 here + fence + fence.i + jalr zero, zero, %lo(whereto) // Debug module will put different instructions and data in the RAM, + // so we use fence and fence.i for safety. (rocket-chip doesn't have this + // because jalr is special there) + +_resume: + csrr s0, CSR_MHARTID + sw s0, DEBUG_ROM_RESUMING(zero) // When Debug Module sees this write, the RESUME flag is reset. + csrr s0, CSR_DSCRATCH // Restore s0 + dret + + // END OF ACTUAL "ROM" CONTENTS. BELOW IS JUST FOR LINKER SCRIPT. + +.section .whereto +whereto: + nop + // Variable "ROM" This is : jal x0 abstract, jal x0 program_buffer, + // or jal x0 resume, as desired. + // Debug Module state machine tracks what is 'desired'. + // We don't need/want to use jalr here because all of the + // Variable ROM contents are set by + // Debug Module before setting the OK_GO byte. diff --git a/vendor/riscv-isa-sim/debug_rom/debug_rom.h b/vendor/riscv-isa-sim/debug_rom/debug_rom.h new file mode 100644 index 00000000..7edd5f68 --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/debug_rom.h @@ -0,0 +1,13 @@ +static const unsigned char debug_rom_raw[] = { + 0x6f, 0x00, 0xc0, 0x00, 0x6f, 0x00, 0x00, 0x06, 0x6f, 0x00, 0x80, 0x03, + 0x0f, 0x00, 0xf0, 0x0f, 0x73, 0x10, 0x24, 0x7b, 0x73, 0x24, 0x40, 0xf1, + 0x23, 0x20, 0x80, 0x10, 0x03, 0x44, 0x04, 0x40, 0x13, 0x74, 0x14, 0x00, + 0x63, 0x14, 0x04, 0x02, 0x73, 0x24, 0x40, 0xf1, 0x03, 0x44, 0x04, 0x40, + 0x13, 0x74, 0x24, 0x00, 0x63, 0x18, 0x04, 0x02, 0x73, 0x00, 0x50, 0x10, + 0x6f, 0xf0, 0x9f, 0xfd, 0x73, 0x24, 0x20, 0x7b, 0x23, 0x26, 0x00, 0x10, + 0x73, 0x00, 0x10, 0x00, 0x73, 0x24, 0x40, 0xf1, 0x23, 0x22, 0x80, 0x10, + 0x73, 0x24, 0x20, 0x7b, 0x0f, 0x00, 0xf0, 0x0f, 0x0f, 0x10, 0x00, 0x00, + 0x67, 0x00, 0x00, 0x30, 0x73, 0x24, 0x40, 0xf1, 0x23, 0x24, 0x80, 0x10, + 0x73, 0x24, 0x20, 0x7b, 0x73, 0x00, 0x20, 0x7b +}; +static const unsigned int debug_rom_raw_len = 116; diff --git a/vendor/riscv-isa-sim/debug_rom/link.ld b/vendor/riscv-isa-sim/debug_rom/link.ld new file mode 100644 index 00000000..897c42da --- /dev/null +++ b/vendor/riscv-isa-sim/debug_rom/link.ld @@ -0,0 +1,15 @@ +OUTPUT_ARCH( "riscv" ) +ENTRY( entry ) +SECTIONS +{ + .whereto 0x300 : + { + *(.whereto) + } + . = 0x800; + .text : + { + *(.text) + } + _end = .; +} diff --git a/vendor/riscv-isa-sim/disasm/disasm.ac b/vendor/riscv-isa-sim/disasm/disasm.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/disasm/disasm.cc b/vendor/riscv-isa-sim/disasm/disasm.cc new file mode 100644 index 00000000..d18f0892 --- /dev/null +++ b/vendor/riscv-isa-sim/disasm/disasm.cc @@ -0,0 +1,2147 @@ +// See LICENSE for license details. + +#include "disasm.h" +#include +#include +#include +#include +#include +#include + +// Indicates that the next arg (only) is optional. +// If the result of converting the next arg to a string is "" +// then it will not be printed. +static const arg_t* opt = nullptr; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.i_imm()) + '(' + xpr_name[insn.rs1()] + ')'; + } +} load_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.s_imm()) + '(' + xpr_name[insn.rs1()] + ')'; + } +} store_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::string("(") + xpr_name[insn.rs1()] + ')'; + } +} base_only_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rd()]; + } +} xrd; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rs1()]; + } +} xrs1; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rs2()]; + } +} xrs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rs3()]; + } +} xrs3; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rd()]; + } +} frd; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rs1()]; + } +} frs1; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rs2()]; + } +} frs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rs3()]; + } +} frs3; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + switch (insn.csr()) + { + #define DECLARE_CSR(name, num) case num: return #name; + #include "encoding.h" + #undef DECLARE_CSR + default: + { + char buf[16]; + snprintf(buf, sizeof buf, "unknown_%03" PRIx64, insn.csr()); + return std::string(buf); + } + } + } +} csr; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.i_imm()); + } +} imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.shamt()); + } +} shamt; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::stringstream s; + s << std::hex << "0x" << ((uint32_t)insn.u_imm() >> 12); + return s.str(); + } +} bigimm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string(insn.rs1()); + } +} zimm5; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + int32_t target = insn.sb_imm(); + std::string s = target >= 0 ? "pc + " : "pc - "; + s += std::to_string(abs(target)); + return s; + } +} branch_target; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::stringstream s; + int32_t target = insn.uj_imm(); + char sign = target >= 0 ? '+' : '-'; + s << "pc " << sign << std::hex << " 0x" << abs(target); + return s.str(); + } +} jump_target; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rvc_rs1()]; + } +} rvc_rs1; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rvc_rs2()]; + } +} rvc_rs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rvc_rs2()]; + } +} rvc_fp_rs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rvc_rs1s()]; + } +} rvc_rs1s; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[insn.rvc_rs2s()]; + } +} rvc_rs2s; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return fpr_name[insn.rvc_rs2s()]; + } +} rvc_fp_rs2s; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return xpr_name[X_SP]; + } +} rvc_sp; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_imm()); + } +} rvc_imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_addi4spn_imm()); + } +} rvc_addi4spn_imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_addi16sp_imm()); + } +} rvc_addi16sp_imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_lwsp_imm()); + } +} rvc_lwsp_imm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)(insn.rvc_imm() & 0x3f)); + } +} rvc_shamt; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::stringstream s; + s << std::hex << "0x" << ((uint32_t)insn.rvc_imm() << 12 >> 12); + return s.str(); + } +} rvc_uimm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_lwsp_imm()) + '(' + xpr_name[X_SP] + ')'; + } +} rvc_lwsp_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_ldsp_imm()) + '(' + xpr_name[X_SP] + ')'; + } +} rvc_ldsp_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_swsp_imm()) + '(' + xpr_name[X_SP] + ')'; + } +} rvc_swsp_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_sdsp_imm()) + '(' + xpr_name[X_SP] + ')'; + } +} rvc_sdsp_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_lw_imm()) + '(' + xpr_name[insn.rvc_rs1s()] + ')'; + } +} rvc_lw_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rvc_ld_imm()) + '(' + xpr_name[insn.rvc_rs1s()] + ')'; + } +} rvc_ld_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + int32_t target = insn.rvc_b_imm(); + std::string s = target >= 0 ? "pc + " : "pc - "; + s += std::to_string(abs(target)); + return s; + } +} rvc_branch_target; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + int32_t target = insn.rvc_j_imm(); + std::string s = target >= 0 ? "pc + " : "pc - "; + s += std::to_string(abs(target)); + return s; + } +} rvc_jump_target; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::string("(") + xpr_name[insn.rs1()] + ')'; + } +} v_address; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return vr_name[insn.rd()]; + } +} vd; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return vr_name[insn.rs1()]; + } +} vs1; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return vr_name[insn.rs2()]; + } +} vs2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return vr_name[insn.rd()]; + } +} vs3; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return insn.v_vm() ? "" : "v0.t"; + } +} vm; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return "v0"; + } +} v0; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.v_simm5()); + } +} v_simm5; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::stringstream s; + int sew = insn.v_sew(); + int lmul = insn.v_lmul(); + auto vta = insn.v_vta() == 1 ? "ta" : "tu"; + auto vma = insn.v_vma() == 1 ? "ma" : "mu"; + s << "e" << sew; + if(insn.v_frac_lmul()) { + std::string lmul_str = ""; + switch(lmul){ + case 3: + lmul_str = "f2"; + break; + case 2: + lmul_str = "f4"; + break; + case 1: + lmul_str = "f8"; + break; + default: + assert(true && "unsupport fractional LMUL"); + } + s << ", m" << lmul_str; + } else { + s << ", m" << (1 << lmul); + } + s << ", " << vta << ", " << vma; + return s.str(); + } +} v_vtype; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return "x0"; + } +} x0; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + std::string s; + auto iorw = insn.iorw(); + bool has_pre = false; + static const char type[] = "wroi"; + for (int i = 7; i >= 4; --i) { + if (iorw & (1ul << i)) { + s += type[i - 4]; + has_pre = true; + } + } + + s += (has_pre ? "," : ""); + for (int i = 3; i >= 0; --i) { + if (iorw & (1ul << i)) { + s += type[i]; + } + } + + return s; + } +} iorw; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm2()); + } +} p_imm2; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm3()); + } +} p_imm3; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm4()); + } +} p_imm4; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm5()); + } +} p_imm5; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.p_imm6()); + } +} p_imm6; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.bs()); + } +} bs; + +struct : public arg_t { + std::string to_string(insn_t insn) const { + return std::to_string((int)insn.rcon()); + } +} rcon; + +typedef struct { + reg_t match; + reg_t mask; + const char *fmt; + std::vector& arg; +} custom_fmt_t; + +std::string disassembler_t::disassemble(insn_t insn) const +{ + const disasm_insn_t* disasm_insn = lookup(insn); + return disasm_insn ? disasm_insn->to_string(insn) : "unknown"; +} + +static void NOINLINE add_noarg_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {})); +} + +static void NOINLINE add_rtype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &xrs2})); +} + +static void NOINLINE add_r1type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1})); +} + +static void NOINLINE add_r3type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &xrs2, &xrs3})); +} + +static void NOINLINE add_itype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &imm})); +} + +static void NOINLINE add_itype_shift_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &shamt})); +} + +static void NOINLINE add_xload_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &load_address})); +} + +static void NOINLINE add_xstore_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrs2, &store_address})); +} + +static void NOINLINE add_fload_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &load_address})); +} + +static void NOINLINE add_fstore_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frs2, &store_address})); +} + +static void NOINLINE add_xamo_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs2, &base_only_address})); +} + +static void NOINLINE add_xlr_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &base_only_address})); +} + +static void NOINLINE add_xst_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrs2, &base_only_address})); +} + +static void NOINLINE add_btype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrs1, &xrs2, &branch_target})); +} + +static void NOINLINE add_b1type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + const uint32_t mask_rs2 = 0x1fUL << 20; + d->add_insn(new disasm_insn_t(name, match, mask | mask_rs2, {&xrs1, &branch_target})); +} + +static void NOINLINE add_frtype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &frs1, &frs2})); +} + +static void NOINLINE add_fr1type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &frs1})); +} + +static void NOINLINE add_fr3type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &frs1, &frs2, &frs3})); +} + +static void NOINLINE add_fxtype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &frs1})); +} + +static void NOINLINE add_xftype_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&frd, &xrs1})); +} + +static void NOINLINE add_fx2type_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &frs1, &frs2})); +} + +static void NOINLINE add_sfence_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrs1, &xrs2})); +} + +static void NOINLINE add_pitype3_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm3})); +} + +static void NOINLINE add_pitype4_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm4})); +} + +static void NOINLINE add_pitype5_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm5})); +} + +static void NOINLINE add_pitype6_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&xrd, &xrs1, &p_imm6})); +} + +static void NOINLINE add_vector_v_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, opt, &vm})); +} + +static void NOINLINE add_vector_vv_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &vs1, opt, &vm})); +} + +static void NOINLINE add_vector_vx_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &xrs1, opt, &vm})); +} + +static void NOINLINE add_vector_vf_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &frs1, opt, &vm})); +} + +static void NOINLINE add_vector_vi_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &v_simm5, opt, &vm})); +} + +static void NOINLINE add_vector_viu_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &zimm5, opt, &vm})); +} + +static void NOINLINE add_vector_vvm_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &vs1, &v0})); +} + +static void NOINLINE add_vector_vxm_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &xrs1, &v0})); +} + +static void NOINLINE add_vector_vim_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + d->add_insn(new disasm_insn_t(name, match, mask, {&vd, &vs2, &v_simm5, &v0})); +} + +static void NOINLINE add_unknown_insn(disassembler_t* d, const char* name, uint32_t match, uint32_t mask) +{ + std::string s = name; + s += " (args unknown)"; + + d->add_insn(new disasm_insn_t(s.c_str(), match, mask, {})); +} + + +static void NOINLINE add_unknown_insns(disassembler_t* d) +{ + // provide a default disassembly for all instructions as a fallback + #define DECLARE_INSN(code, match, mask) \ + add_unknown_insn(d, #code, match, mask); + #include "encoding.h" + #undef DECLARE_INSN +} + +void disassembler_t::add_instructions(const isa_parser_t* isa) +{ + const uint32_t mask_rd = 0x1fUL << 7; + const uint32_t match_rd_ra = 1UL << 7; + const uint32_t mask_rs1 = 0x1fUL << 15; + const uint32_t match_rs1_ra = 1UL << 15; + const uint32_t mask_rs2 = 0x1fUL << 20; + const uint32_t mask_imm = 0xfffUL << 20; + const uint32_t imm_shift = 20; + const uint32_t mask_rvc_rs2 = 0x1fUL << 2; + const uint32_t mask_rvc_imm = mask_rvc_rs2 | 0x1000UL; + const uint32_t mask_nf = 0x7Ul << 29; + const uint32_t mask_wd = 0x1Ul << 26; + const uint32_t mask_vm = 0x1Ul << 25; + const uint32_t mask_vldst = 0x7Ul << 12 | 0x1UL << 28; + const uint32_t mask_amoop = 0x1fUl << 27; + const uint32_t mask_width = 0x7Ul << 12; + + #define DECLARE_INSN(code, match, mask) \ + const uint32_t match_##code = match; \ + const uint32_t mask_##code = mask; + #include "encoding.h" + #undef DECLARE_INSN + + // explicit per-instruction disassembly + #define DISASM_INSN(name, code, extra, ...) \ + add_insn(new disasm_insn_t(name, match_##code, mask_##code | (extra), __VA_ARGS__)); + #define DEFINE_NOARG(code) add_noarg_insn(this, #code, match_##code, mask_##code); + #define DEFINE_RTYPE(code) add_rtype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_R1TYPE(code) add_r1type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_R3TYPE(code) add_r3type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_ITYPE(code) add_itype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_ITYPE_SHIFT(code) add_itype_shift_insn(this, #code, match_##code, mask_##code); + #define DEFINE_I0TYPE(name, code) DISASM_INSN(name, code, mask_rs1, {&xrd, &imm}) + #define DEFINE_I1TYPE(name, code) DISASM_INSN(name, code, mask_imm, {&xrd, &xrs1}) + #define DEFINE_I2TYPE(name, code) DISASM_INSN(name, code, mask_rd | mask_imm, {&xrs1}) + #define DEFINE_LTYPE(code) DISASM_INSN(#code, code, 0, {&xrd, &bigimm}) + #define DEFINE_BTYPE(code) add_btype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_B1TYPE(name, code) add_b1type_insn(this, name, match_##code, mask_##code); + #define DEFINE_XLOAD(code) add_xload_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XSTORE(code) add_xstore_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XAMO(code) add_xamo_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XLOAD_BASE(code) add_xlr_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XSTORE_BASE(code) add_xst_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FLOAD(code) add_fload_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FSTORE(code) add_fstore_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FRTYPE(code) add_frtype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FR1TYPE(code) add_fr1type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FR3TYPE(code) add_fr3type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FXTYPE(code) add_fxtype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_FX2TYPE(code) add_fx2type_insn(this, #code, match_##code, mask_##code); + #define DEFINE_XFTYPE(code) add_xftype_insn(this, #code, match_##code, mask_##code); + #define DEFINE_SFENCE_TYPE(code) add_sfence_insn(this, #code, match_##code, mask_##code); + + add_insn(new disasm_insn_t("unimp", match_csrrw|(CSR_CYCLE<<20), 0xffffffff, {})); + add_insn(new disasm_insn_t("c.unimp", 0, 0xffff, {})); + + DEFINE_XLOAD(lb) + DEFINE_XLOAD(lbu) + DEFINE_XLOAD(lh) + DEFINE_XLOAD(lhu) + DEFINE_XLOAD(lw) + DEFINE_XLOAD(lwu) + DEFINE_XLOAD(ld) + + DEFINE_XSTORE(sb) + DEFINE_XSTORE(sh) + DEFINE_XSTORE(sw) + DEFINE_XSTORE(sd) + + if (isa->extension_enabled('A')) { + DEFINE_XAMO(amoadd_w) + DEFINE_XAMO(amoswap_w) + DEFINE_XAMO(amoand_w) + DEFINE_XAMO(amoor_w) + DEFINE_XAMO(amoxor_w) + DEFINE_XAMO(amomin_w) + DEFINE_XAMO(amomax_w) + DEFINE_XAMO(amominu_w) + DEFINE_XAMO(amomaxu_w) + DEFINE_XAMO(amoadd_d) + DEFINE_XAMO(amoswap_d) + DEFINE_XAMO(amoand_d) + DEFINE_XAMO(amoor_d) + DEFINE_XAMO(amoxor_d) + DEFINE_XAMO(amomin_d) + DEFINE_XAMO(amomax_d) + DEFINE_XAMO(amominu_d) + DEFINE_XAMO(amomaxu_d) + DEFINE_XLOAD_BASE(lr_w) + DEFINE_XAMO(sc_w) + DEFINE_XLOAD_BASE(lr_d) + DEFINE_XAMO(sc_d) + } + + add_insn(new disasm_insn_t("j", match_jal, mask_jal | mask_rd, {&jump_target})); + add_insn(new disasm_insn_t("jal", match_jal | match_rd_ra, mask_jal | mask_rd, {&jump_target})); + add_insn(new disasm_insn_t("jal", match_jal, mask_jal, {&xrd, &jump_target})); + + DEFINE_B1TYPE("beqz", beq); + DEFINE_B1TYPE("bnez", bne); + DEFINE_B1TYPE("bltz", blt); + DEFINE_B1TYPE("bgez", bge); + DEFINE_BTYPE(beq) + DEFINE_BTYPE(bne) + DEFINE_BTYPE(blt) + DEFINE_BTYPE(bge) + DEFINE_BTYPE(bltu) + DEFINE_BTYPE(bgeu) + + DEFINE_LTYPE(lui); + DEFINE_LTYPE(auipc); + + add_insn(new disasm_insn_t("ret", match_jalr | match_rs1_ra, mask_jalr | mask_rd | mask_rs1 | mask_imm, {})); + DEFINE_I2TYPE("jr", jalr); + add_insn(new disasm_insn_t("jalr", match_jalr | match_rd_ra, mask_jalr | mask_rd | mask_imm, {&xrs1})); + DEFINE_ITYPE(jalr); + + add_noarg_insn(this, "nop", match_addi, mask_addi | mask_rd | mask_rs1 | mask_imm); + DEFINE_I0TYPE("li", addi); + DEFINE_I1TYPE("mv", addi); + DEFINE_ITYPE(addi); + DEFINE_ITYPE(slti); + add_insn(new disasm_insn_t("seqz", match_sltiu | (1 << imm_shift), mask_sltiu | mask_imm, {&xrd, &xrs1})); + DEFINE_ITYPE(sltiu); + add_insn(new disasm_insn_t("not", match_xori | mask_imm, mask_xori | mask_imm, {&xrd, &xrs1})); + DEFINE_ITYPE(xori); + + DEFINE_ITYPE_SHIFT(slli); + DEFINE_ITYPE_SHIFT(srli); + DEFINE_ITYPE_SHIFT(srai); + + DEFINE_ITYPE(ori); + DEFINE_ITYPE(andi); + DEFINE_I1TYPE("sext.w", addiw); + DEFINE_ITYPE(addiw); + + DEFINE_ITYPE_SHIFT(slliw); + DEFINE_ITYPE_SHIFT(srliw); + DEFINE_ITYPE_SHIFT(sraiw); + + DEFINE_RTYPE(add); + DEFINE_RTYPE(sub); + DEFINE_RTYPE(sll); + DEFINE_RTYPE(slt); + add_insn(new disasm_insn_t("snez", match_sltu, mask_sltu | mask_rs1, {&xrd, &xrs2})); + DEFINE_RTYPE(sltu); + DEFINE_RTYPE(xor); + DEFINE_RTYPE(srl); + DEFINE_RTYPE(sra); + DEFINE_RTYPE(or); + DEFINE_RTYPE(and); + DEFINE_RTYPE(addw); + DEFINE_RTYPE(subw); + DEFINE_RTYPE(sllw); + DEFINE_RTYPE(srlw); + DEFINE_RTYPE(sraw); + + DEFINE_NOARG(ecall); + DEFINE_NOARG(ebreak); + DEFINE_NOARG(mret); + DEFINE_NOARG(dret); + DEFINE_NOARG(wfi); + add_insn(new disasm_insn_t("fence", match_fence, mask_fence, {&iorw})); + DEFINE_NOARG(fence_i); + + add_insn(new disasm_insn_t("csrr", match_csrrs, mask_csrrs | mask_rs1, {&xrd, &csr})); + add_insn(new disasm_insn_t("csrw", match_csrrw, mask_csrrw | mask_rd, {&csr, &xrs1})); + add_insn(new disasm_insn_t("csrs", match_csrrs, mask_csrrs | mask_rd, {&csr, &xrs1})); + add_insn(new disasm_insn_t("csrc", match_csrrc, mask_csrrc | mask_rd, {&csr, &xrs1})); + add_insn(new disasm_insn_t("csrwi", match_csrrwi, mask_csrrwi | mask_rd, {&csr, &zimm5})); + add_insn(new disasm_insn_t("csrsi", match_csrrsi, mask_csrrsi | mask_rd, {&csr, &zimm5})); + add_insn(new disasm_insn_t("csrci", match_csrrci, mask_csrrci | mask_rd, {&csr, &zimm5})); + add_insn(new disasm_insn_t("csrrw", match_csrrw, mask_csrrw, {&xrd, &csr, &xrs1})); + add_insn(new disasm_insn_t("csrrs", match_csrrs, mask_csrrs, {&xrd, &csr, &xrs1})); + add_insn(new disasm_insn_t("csrrc", match_csrrc, mask_csrrc, {&xrd, &csr, &xrs1})); + add_insn(new disasm_insn_t("csrrwi", match_csrrwi, mask_csrrwi, {&xrd, &csr, &zimm5})); + add_insn(new disasm_insn_t("csrrsi", match_csrrsi, mask_csrrsi, {&xrd, &csr, &zimm5})); + add_insn(new disasm_insn_t("csrrci", match_csrrci, mask_csrrci, {&xrd, &csr, &zimm5})); + + if (isa->extension_enabled('S')) { + DEFINE_NOARG(sret); + DEFINE_SFENCE_TYPE(sfence_vma); + } + + if (isa->extension_enabled('M')) { + DEFINE_RTYPE(mul); + DEFINE_RTYPE(mulh); + DEFINE_RTYPE(mulhu); + DEFINE_RTYPE(mulhsu); + DEFINE_RTYPE(mulw); + DEFINE_RTYPE(div); + DEFINE_RTYPE(divu); + DEFINE_RTYPE(rem); + DEFINE_RTYPE(remu); + DEFINE_RTYPE(divw); + DEFINE_RTYPE(divuw); + DEFINE_RTYPE(remw); + DEFINE_RTYPE(remuw); + } + + if (isa->extension_enabled(EXT_ZBA)) { + DEFINE_RTYPE(sh1add); + DEFINE_RTYPE(sh2add); + DEFINE_RTYPE(sh3add); + if (isa->get_max_xlen() == 64) { + DEFINE_ITYPE_SHIFT(slli_uw); + add_insn(new disasm_insn_t("zext.w", match_add_uw, mask_add_uw | mask_rs2, {&xrd, &xrs1})); + DEFINE_RTYPE(add_uw); + DEFINE_RTYPE(sh1add_uw); + DEFINE_RTYPE(sh2add_uw); + DEFINE_RTYPE(sh3add_uw); + } + } + + if (isa->extension_enabled(EXT_ZBB)) { + DEFINE_RTYPE(ror); + DEFINE_RTYPE(rol); + DEFINE_ITYPE_SHIFT(rori); + DEFINE_R1TYPE(ctz); + DEFINE_R1TYPE(clz); + DEFINE_R1TYPE(cpop); + DEFINE_RTYPE(min); + DEFINE_RTYPE(minu); + DEFINE_RTYPE(max); + DEFINE_RTYPE(maxu); + DEFINE_RTYPE(andn); + DEFINE_RTYPE(orn); + DEFINE_RTYPE(xnor); + DEFINE_R1TYPE(sext_b); + DEFINE_R1TYPE(sext_h); + add_insn(new disasm_insn_t("rev8", match_grevi | ((isa->get_max_xlen() - 8) << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); + add_insn(new disasm_insn_t("orc.b", match_gorci | (0x7 << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); + add_insn(new disasm_insn_t("zext.h", (isa->get_max_xlen() == 32 ? match_pack : match_packw), mask_pack | mask_rs2, {&xrd, &xrs1})); + if (isa->get_max_xlen() == 64) { + DEFINE_RTYPE(rorw); + DEFINE_RTYPE(rolw); + DEFINE_ITYPE_SHIFT(roriw); + DEFINE_R1TYPE(ctzw); + DEFINE_R1TYPE(clzw); + DEFINE_R1TYPE(cpopw); + } + } + + if (isa->extension_enabled(EXT_ZBS)) { + DEFINE_RTYPE(bclr); + DEFINE_RTYPE(binv); + DEFINE_RTYPE(bset); + DEFINE_RTYPE(bext); + DEFINE_ITYPE_SHIFT(bclri); + DEFINE_ITYPE_SHIFT(binvi); + DEFINE_ITYPE_SHIFT(bseti); + DEFINE_ITYPE_SHIFT(bexti); + } + + if (isa->extension_enabled(EXT_ZBKB)) { + add_insn(new disasm_insn_t("brev8", match_grevi | (0x7 << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); // brev8 + add_insn(new disasm_insn_t("rev8", match_grevi | ((isa->get_max_xlen() - 8) << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); + DEFINE_RTYPE(pack); + DEFINE_RTYPE(packh); + if (isa->get_max_xlen() == 64) { + DEFINE_RTYPE(packw); + } + } + + if (isa->extension_enabled(EXT_SVINVAL)) { + DEFINE_NOARG(sfence_w_inval); + DEFINE_NOARG(sfence_inval_ir); + DEFINE_SFENCE_TYPE(sinval_vma); + DEFINE_SFENCE_TYPE(hinval_vvma); + DEFINE_SFENCE_TYPE(hinval_gvma); + } + + if (isa->extension_enabled('F')) { + DEFINE_FLOAD(flw) + DEFINE_FSTORE(fsw) + DEFINE_FRTYPE(fadd_s); + DEFINE_FRTYPE(fsub_s); + DEFINE_FRTYPE(fmul_s); + DEFINE_FRTYPE(fdiv_s); + DEFINE_FR1TYPE(fsqrt_s); + DEFINE_FRTYPE(fmin_s); + DEFINE_FRTYPE(fmax_s); + DEFINE_FR3TYPE(fmadd_s); + DEFINE_FR3TYPE(fmsub_s); + DEFINE_FR3TYPE(fnmadd_s); + DEFINE_FR3TYPE(fnmsub_s); + DEFINE_FRTYPE(fsgnj_s); + DEFINE_FRTYPE(fsgnjn_s); + DEFINE_FRTYPE(fsgnjx_s); + DEFINE_FR1TYPE(fcvt_s_d); + DEFINE_FR1TYPE(fcvt_s_q); + DEFINE_XFTYPE(fcvt_s_l); + DEFINE_XFTYPE(fcvt_s_lu); + DEFINE_XFTYPE(fcvt_s_w); + DEFINE_XFTYPE(fcvt_s_wu); + DEFINE_XFTYPE(fcvt_s_wu); + DEFINE_XFTYPE(fmv_w_x); + DEFINE_FXTYPE(fcvt_l_s); + DEFINE_FXTYPE(fcvt_lu_s); + DEFINE_FXTYPE(fcvt_w_s); + DEFINE_FXTYPE(fcvt_wu_s); + DEFINE_FXTYPE(fclass_s); + DEFINE_FXTYPE(fmv_x_w); + DEFINE_FX2TYPE(feq_s); + DEFINE_FX2TYPE(flt_s); + DEFINE_FX2TYPE(fle_s); + } + + if (isa->extension_enabled(EXT_ZFINX)) { + DEFINE_RTYPE(fadd_s); + DEFINE_RTYPE(fsub_s); + DEFINE_RTYPE(fmul_s); + DEFINE_RTYPE(fdiv_s); + DEFINE_R1TYPE(fsqrt_s); + DEFINE_RTYPE(fmin_s); + DEFINE_RTYPE(fmax_s); + DEFINE_R3TYPE(fmadd_s); + DEFINE_R3TYPE(fmsub_s); + DEFINE_R3TYPE(fnmadd_s); + DEFINE_R3TYPE(fnmsub_s); + DEFINE_RTYPE(fsgnj_s); + DEFINE_RTYPE(fsgnjn_s); + DEFINE_RTYPE(fsgnjx_s); + DEFINE_R1TYPE(fcvt_s_d); + //DEFINE_R1TYPE(fcvt_s_q); + DEFINE_R1TYPE(fcvt_s_l); + DEFINE_R1TYPE(fcvt_s_lu); + DEFINE_R1TYPE(fcvt_s_w); + DEFINE_R1TYPE(fcvt_s_wu); + DEFINE_R1TYPE(fcvt_s_wu); + DEFINE_R1TYPE(fcvt_l_s); + DEFINE_R1TYPE(fcvt_lu_s); + DEFINE_R1TYPE(fcvt_w_s); + DEFINE_R1TYPE(fcvt_wu_s); + DEFINE_R1TYPE(fclass_s); + DEFINE_RTYPE(feq_s); + DEFINE_RTYPE(flt_s); + DEFINE_RTYPE(fle_s); + } + + if (isa->extension_enabled('D')) { + DEFINE_FLOAD(fld) + DEFINE_FSTORE(fsd) + DEFINE_FRTYPE(fadd_d); + DEFINE_FRTYPE(fsub_d); + DEFINE_FRTYPE(fmul_d); + DEFINE_FRTYPE(fdiv_d); + DEFINE_FR1TYPE(fsqrt_d); + DEFINE_FRTYPE(fmin_d); + DEFINE_FRTYPE(fmax_d); + DEFINE_FR3TYPE(fmadd_d); + DEFINE_FR3TYPE(fmsub_d); + DEFINE_FR3TYPE(fnmadd_d); + DEFINE_FR3TYPE(fnmsub_d); + DEFINE_FRTYPE(fsgnj_d); + DEFINE_FRTYPE(fsgnjn_d); + DEFINE_FRTYPE(fsgnjx_d); + DEFINE_FR1TYPE(fcvt_d_s); + DEFINE_FR1TYPE(fcvt_d_q); + DEFINE_XFTYPE(fcvt_d_l); + DEFINE_XFTYPE(fcvt_d_lu); + DEFINE_XFTYPE(fcvt_d_w); + DEFINE_XFTYPE(fcvt_d_wu); + DEFINE_XFTYPE(fcvt_d_wu); + DEFINE_XFTYPE(fmv_d_x); + DEFINE_FXTYPE(fcvt_l_d); + DEFINE_FXTYPE(fcvt_lu_d); + DEFINE_FXTYPE(fcvt_w_d); + DEFINE_FXTYPE(fcvt_wu_d); + DEFINE_FXTYPE(fclass_d); + DEFINE_FXTYPE(fmv_x_d); + DEFINE_FX2TYPE(feq_d); + DEFINE_FX2TYPE(flt_d); + DEFINE_FX2TYPE(fle_d); + } + + if (isa->extension_enabled(EXT_ZDINX)) { + DEFINE_RTYPE(fadd_d); + DEFINE_RTYPE(fsub_d); + DEFINE_RTYPE(fmul_d); + DEFINE_RTYPE(fdiv_d); + DEFINE_R1TYPE(fsqrt_d); + DEFINE_RTYPE(fmin_d); + DEFINE_RTYPE(fmax_d); + DEFINE_R3TYPE(fmadd_d); + DEFINE_R3TYPE(fmsub_d); + DEFINE_R3TYPE(fnmadd_d); + DEFINE_R3TYPE(fnmsub_d); + DEFINE_RTYPE(fsgnj_d); + DEFINE_RTYPE(fsgnjn_d); + DEFINE_RTYPE(fsgnjx_d); + DEFINE_R1TYPE(fcvt_d_s); + //DEFINE_R1TYPE(fcvt_d_q); + DEFINE_R1TYPE(fcvt_d_l); + DEFINE_R1TYPE(fcvt_d_lu); + DEFINE_R1TYPE(fcvt_d_w); + DEFINE_R1TYPE(fcvt_d_wu); + DEFINE_R1TYPE(fcvt_d_wu); + DEFINE_R1TYPE(fcvt_l_d); + DEFINE_R1TYPE(fcvt_lu_d); + DEFINE_R1TYPE(fcvt_w_d); + DEFINE_R1TYPE(fcvt_wu_d); + DEFINE_R1TYPE(fclass_d); + DEFINE_RTYPE(feq_d); + DEFINE_RTYPE(flt_d); + DEFINE_RTYPE(fle_d); + } + + if (isa->extension_enabled(EXT_ZFH)) { + DEFINE_FRTYPE(fadd_h); + DEFINE_FRTYPE(fsub_h); + DEFINE_FRTYPE(fmul_h); + DEFINE_FRTYPE(fdiv_h); + DEFINE_FR1TYPE(fsqrt_h); + DEFINE_FRTYPE(fmin_h); + DEFINE_FRTYPE(fmax_h); + DEFINE_FR3TYPE(fmadd_h); + DEFINE_FR3TYPE(fmsub_h); + DEFINE_FR3TYPE(fnmadd_h); + DEFINE_FR3TYPE(fnmsub_h); + DEFINE_FRTYPE(fsgnj_h); + DEFINE_FRTYPE(fsgnjn_h); + DEFINE_FRTYPE(fsgnjx_h); + DEFINE_XFTYPE(fcvt_h_l); + DEFINE_XFTYPE(fcvt_h_lu); + DEFINE_XFTYPE(fcvt_h_w); + DEFINE_XFTYPE(fcvt_h_wu); + DEFINE_XFTYPE(fcvt_h_wu); + DEFINE_FXTYPE(fcvt_l_h); + DEFINE_FXTYPE(fcvt_lu_h); + DEFINE_FXTYPE(fcvt_w_h); + DEFINE_FXTYPE(fcvt_wu_h); + DEFINE_FXTYPE(fclass_h); + DEFINE_FX2TYPE(feq_h); + DEFINE_FX2TYPE(flt_h); + DEFINE_FX2TYPE(fle_h); + } + + if (isa->extension_enabled(EXT_ZHINX)) { + DEFINE_RTYPE(fadd_h); + DEFINE_RTYPE(fsub_h); + DEFINE_RTYPE(fmul_h); + DEFINE_RTYPE(fdiv_h); + DEFINE_R1TYPE(fsqrt_h); + DEFINE_RTYPE(fmin_h); + DEFINE_RTYPE(fmax_h); + DEFINE_R3TYPE(fmadd_h); + DEFINE_R3TYPE(fmsub_h); + DEFINE_R3TYPE(fnmadd_h); + DEFINE_R3TYPE(fnmsub_h); + DEFINE_RTYPE(fsgnj_h); + DEFINE_RTYPE(fsgnjn_h); + DEFINE_RTYPE(fsgnjx_h); + DEFINE_R1TYPE(fcvt_h_l); + DEFINE_R1TYPE(fcvt_h_lu); + DEFINE_R1TYPE(fcvt_h_w); + DEFINE_R1TYPE(fcvt_h_wu); + DEFINE_R1TYPE(fcvt_h_wu); + DEFINE_R1TYPE(fcvt_l_h); + DEFINE_R1TYPE(fcvt_lu_h); + DEFINE_R1TYPE(fcvt_w_h); + DEFINE_R1TYPE(fcvt_wu_h); + DEFINE_R1TYPE(fclass_h); + DEFINE_RTYPE(feq_h); + DEFINE_RTYPE(flt_h); + DEFINE_RTYPE(fle_h); + } + + if (isa->extension_enabled(EXT_ZFHMIN)) { + DEFINE_FLOAD(flh) + DEFINE_FSTORE(fsh) + DEFINE_FR1TYPE(fcvt_h_s); + DEFINE_FR1TYPE(fcvt_h_d); + DEFINE_FR1TYPE(fcvt_h_q); + DEFINE_FR1TYPE(fcvt_s_h); + DEFINE_FR1TYPE(fcvt_d_h); + DEFINE_FR1TYPE(fcvt_q_h); + DEFINE_XFTYPE(fmv_h_x); + DEFINE_FXTYPE(fmv_x_h); + } + + if (isa->extension_enabled(EXT_ZHINXMIN)) { + DEFINE_R1TYPE(fcvt_h_s); + DEFINE_R1TYPE(fcvt_h_d); + //DEFINE_R1TYPE(fcvt_h_q); + DEFINE_R1TYPE(fcvt_s_h); + DEFINE_R1TYPE(fcvt_d_h); + //DEFINE_R1TYPE(fcvt_q_h); + } + + if (isa->extension_enabled('Q')) { + DEFINE_FLOAD(flq) + DEFINE_FSTORE(fsq) + DEFINE_FRTYPE(fadd_q); + DEFINE_FRTYPE(fsub_q); + DEFINE_FRTYPE(fmul_q); + DEFINE_FRTYPE(fdiv_q); + DEFINE_FR1TYPE(fsqrt_q); + DEFINE_FRTYPE(fmin_q); + DEFINE_FRTYPE(fmax_q); + DEFINE_FR3TYPE(fmadd_q); + DEFINE_FR3TYPE(fmsub_q); + DEFINE_FR3TYPE(fnmadd_q); + DEFINE_FR3TYPE(fnmsub_q); + DEFINE_FRTYPE(fsgnj_q); + DEFINE_FRTYPE(fsgnjn_q); + DEFINE_FRTYPE(fsgnjx_q); + DEFINE_FR1TYPE(fcvt_q_s); + DEFINE_FR1TYPE(fcvt_q_d); + DEFINE_XFTYPE(fcvt_q_l); + DEFINE_XFTYPE(fcvt_q_lu); + DEFINE_XFTYPE(fcvt_q_w); + DEFINE_XFTYPE(fcvt_q_wu); + DEFINE_XFTYPE(fcvt_q_wu); + DEFINE_FXTYPE(fcvt_l_q); + DEFINE_FXTYPE(fcvt_lu_q); + DEFINE_FXTYPE(fcvt_w_q); + DEFINE_FXTYPE(fcvt_wu_q); + DEFINE_FXTYPE(fclass_q); + DEFINE_FX2TYPE(feq_q); + DEFINE_FX2TYPE(flt_q); + DEFINE_FX2TYPE(fle_q); + } + + // ext-h + if (isa->extension_enabled('H')) { + DEFINE_XLOAD_BASE(hlv_b) + DEFINE_XLOAD_BASE(hlv_bu) + DEFINE_XLOAD_BASE(hlv_h) + DEFINE_XLOAD_BASE(hlv_hu) + DEFINE_XLOAD_BASE(hlv_w) + DEFINE_XLOAD_BASE(hlv_wu) + DEFINE_XLOAD_BASE(hlv_d) + + DEFINE_XLOAD_BASE(hlvx_hu) + DEFINE_XLOAD_BASE(hlvx_wu) + + DEFINE_XSTORE_BASE(hsv_b) + DEFINE_XSTORE_BASE(hsv_h) + DEFINE_XSTORE_BASE(hsv_w) + DEFINE_XSTORE_BASE(hsv_d) + + DEFINE_SFENCE_TYPE(hfence_gvma); + DEFINE_SFENCE_TYPE(hfence_vvma); + } + + // ext-c + if (isa->extension_enabled('C')) { + DISASM_INSN("c.ebreak", c_add, mask_rd | mask_rvc_rs2, {}); + add_insn(new disasm_insn_t("ret", match_c_jr | match_rd_ra, mask_c_jr | mask_rd | mask_rvc_imm, {})); + DISASM_INSN("c.jr", c_jr, mask_rvc_imm, {&rvc_rs1}); + DISASM_INSN("c.jalr", c_jalr, mask_rvc_imm, {&rvc_rs1}); + DISASM_INSN("c.nop", c_addi, mask_rd | mask_rvc_imm, {}); + DISASM_INSN("c.addi16sp", c_addi16sp, mask_rd, {&rvc_sp, &rvc_addi16sp_imm}); + DISASM_INSN("c.addi4spn", c_addi4spn, 0, {&rvc_rs2s, &rvc_sp, &rvc_addi4spn_imm}); + DISASM_INSN("c.li", c_li, 0, {&xrd, &rvc_imm}); + DISASM_INSN("c.lui", c_lui, 0, {&xrd, &rvc_uimm}); + DISASM_INSN("c.addi", c_addi, 0, {&xrd, &rvc_imm}); + DISASM_INSN("c.slli", c_slli, 0, {&rvc_rs1, &rvc_shamt}); + DISASM_INSN("c.srli", c_srli, 0, {&rvc_rs1s, &rvc_shamt}); + DISASM_INSN("c.srai", c_srai, 0, {&rvc_rs1s, &rvc_shamt}); + DISASM_INSN("c.andi", c_andi, 0, {&rvc_rs1s, &rvc_imm}); + DISASM_INSN("c.mv", c_mv, 0, {&xrd, &rvc_rs2}); + DISASM_INSN("c.add", c_add, 0, {&xrd, &rvc_rs2}); + DISASM_INSN("c.addw", c_addw, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.sub", c_sub, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.subw", c_subw, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.and", c_and, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.or", c_or, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.xor", c_xor, 0, {&rvc_rs1s, &rvc_rs2s}); + DISASM_INSN("c.lwsp", c_lwsp, 0, {&xrd, &rvc_lwsp_address}); + DISASM_INSN("c.fld", c_fld, 0, {&rvc_fp_rs2s, &rvc_ld_address}); + DISASM_INSN("c.swsp", c_swsp, 0, {&rvc_rs2, &rvc_swsp_address}); + DISASM_INSN("c.lw", c_lw, 0, {&rvc_rs2s, &rvc_lw_address}); + DISASM_INSN("c.sw", c_sw, 0, {&rvc_rs2s, &rvc_lw_address}); + DISASM_INSN("c.beqz", c_beqz, 0, {&rvc_rs1s, &rvc_branch_target}); + DISASM_INSN("c.bnez", c_bnez, 0, {&rvc_rs1s, &rvc_branch_target}); + DISASM_INSN("c.j", c_j, 0, {&rvc_jump_target}); + DISASM_INSN("c.fldsp", c_fldsp, 0, {&frd, &rvc_ldsp_address}); + DISASM_INSN("c.fsd", c_fsd, 0, {&rvc_fp_rs2s, &rvc_ld_address}); + DISASM_INSN("c.fsdsp", c_fsdsp, 0, {&rvc_fp_rs2, &rvc_sdsp_address}); + if (isa->get_max_xlen() == 32) { + DISASM_INSN("c.flw", c_flw, 0, {&rvc_fp_rs2s, &rvc_lw_address}); + DISASM_INSN("c.flwsp", c_flwsp, 0, {&frd, &rvc_lwsp_address}); + DISASM_INSN("c.fsw", c_fsw, 0, {&rvc_fp_rs2s, &rvc_lw_address}); + DISASM_INSN("c.fswsp", c_fswsp, 0, {&rvc_fp_rs2, &rvc_swsp_address}); + DISASM_INSN("c.jal", c_jal, 0, {&rvc_jump_target}); + } else { + DISASM_INSN("c.ld", c_ld, 0, {&rvc_rs2s, &rvc_ld_address}); + DISASM_INSN("c.ldsp", c_ldsp, 0, {&xrd, &rvc_ldsp_address}); + DISASM_INSN("c.sd", c_sd, 0, {&rvc_rs2s, &rvc_ld_address}); + DISASM_INSN("c.sdsp", c_sdsp, 0, {&rvc_rs2, &rvc_sdsp_address}); + DISASM_INSN("c.addiw", c_addiw, 0, {&xrd, &rvc_imm}); + } + } + + if (isa->extension_enabled('V')) { + DISASM_INSN("vsetivli", vsetivli, 0, {&xrd, &zimm5, &v_vtype}); + DISASM_INSN("vsetvli", vsetvli, 0, {&xrd, &xrs1, &v_vtype}); + DEFINE_RTYPE(vsetvl); + + std::vector v_ld_unit = {&vd, &v_address, opt, &vm}; + std::vector v_st_unit = {&vs3, &v_address, opt, &vm}; + std::vector v_ld_stride = {&vd, &v_address, &xrs2, opt, &vm}; + std::vector v_st_stride = {&vs3, &v_address, &xrs2, opt, &vm}; + std::vector v_ld_index = {&vd, &v_address, &vs2, opt, &vm}; + std::vector v_st_index = {&vs3, &v_address, &vs2, opt, &vm}; + + add_insn(new disasm_insn_t("vlm.v", match_vlm_v, mask_vlm_v, v_ld_unit)); + add_insn(new disasm_insn_t("vsm.v", match_vsm_v, mask_vsm_v, v_st_unit)); + + // handle vector segment load/store + for (size_t elt = 0; elt <= 7; ++elt) { + const custom_fmt_t template_insn[] = { + {match_vle8_v, mask_vle8_v, "vl%se%d.v", v_ld_unit}, + {match_vse8_v, mask_vse8_v, "vs%se%d.v", v_st_unit}, + + {match_vluxei8_v, mask_vluxei8_v, "vlux%sei%d.v", v_ld_index}, + {match_vsuxei8_v, mask_vsuxei8_v, "vsux%sei%d.v", v_st_index}, + + {match_vlse8_v, mask_vlse8_v, "vls%se%d.v", v_ld_stride}, + {match_vsse8_v, mask_vsse8_v, "vss%se%d.v", v_st_stride}, + + {match_vloxei8_v, mask_vloxei8_v, "vlox%sei%d.v", v_ld_index}, + {match_vsoxei8_v, mask_vsoxei8_v, "vsox%sei%d.v", v_st_index}, + + {match_vle8ff_v, mask_vle8ff_v, "vl%se%dff.v", v_ld_unit} + }; + + reg_t elt_map[] = {0x00000000, 0x00005000, 0x00006000, 0x00007000, + 0x10000000, 0x10005000, 0x10006000, 0x10007000}; + + for (unsigned nf = 0; nf <= 7; ++nf) { + char seg_str[8] = ""; + if (nf) + sprintf(seg_str, "seg%u", nf + 1); + + for (auto item : template_insn) { + const reg_t match_nf = nf << 29; + char buf[128]; + sprintf(buf, item.fmt, seg_str, 8 << elt); + add_insn(new disasm_insn_t( + buf, + ((item.match | match_nf) & ~mask_vldst) | elt_map[elt], + item.mask | mask_nf, + item.arg + )); + } + } + + const custom_fmt_t template_insn2[] = { + {match_vl1re8_v, mask_vl1re8_v, "vl%dre%d.v", v_ld_unit}, + }; + + for (reg_t i = 0, nf = 7; i < 4; i++, nf >>= 1) { + for (auto item : template_insn2) { + const reg_t match_nf = nf << 29; + char buf[128]; + sprintf(buf, item.fmt, nf + 1, 8 << elt); + add_insn(new disasm_insn_t( + buf, + item.match | match_nf | elt_map[elt], + item.mask | mask_nf, + item.arg + )); + } + } + } + + #define DISASM_ST_WHOLE_INSN(name, nf) \ + add_insn(new disasm_insn_t(#name, match_vs1r_v | (nf << 29), \ + mask_vs1r_v | mask_nf, \ + {&vs3, &v_address})); + DISASM_ST_WHOLE_INSN(vs1r.v, 0); + DISASM_ST_WHOLE_INSN(vs2r.v, 1); + DISASM_ST_WHOLE_INSN(vs4r.v, 3); + DISASM_ST_WHOLE_INSN(vs8r.v, 7); + + #undef DISASM_ST_WHOLE_INSN + + #define DEFINE_VECTOR_V(code) add_vector_v_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VV(code) add_vector_vv_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VX(code) add_vector_vx_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VF(code) add_vector_vf_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VI(code) add_vector_vi_insn(this, #code, match_##code, mask_##code) + #define DEFINE_VECTOR_VIU(code) add_vector_viu_insn(this, #code, match_##code, mask_##code) + + #define DISASM_OPIV_VXI_INSN(name, sign, suf) \ + DEFINE_VECTOR_VV(name##_##suf##v); \ + DEFINE_VECTOR_VX(name##_##suf##x); \ + if (sign) \ + DEFINE_VECTOR_VI(name##_##suf##i); \ + else \ + DEFINE_VECTOR_VIU(name##_##suf##i) + + #define DISASM_OPIV_VX__INSN(name, sign) \ + DEFINE_VECTOR_VV(name##_vv); \ + DEFINE_VECTOR_VX(name##_vx) + + #define DISASM_OPIV__XI_INSN(name, sign) \ + DEFINE_VECTOR_VX(name##_vx); \ + if (sign) \ + DEFINE_VECTOR_VI(name##_vi); \ + else \ + DEFINE_VECTOR_VIU(name##_vi) + + #define DISASM_OPIV_V___INSN(name, sign) DEFINE_VECTOR_VV(name##_vv) + + #define DISASM_OPIV_S___INSN(name, sign) DEFINE_VECTOR_VV(name##_vs) + + #define DISASM_OPIV_W___INSN(name, sign) \ + DEFINE_VECTOR_VV(name##_wv); \ + DEFINE_VECTOR_VX(name##_wx) + + #define DISASM_OPIV_M___INSN(name, sign) DEFINE_VECTOR_VV(name##_mm) + + #define DISASM_OPIV__X__INSN(name, sign) DEFINE_VECTOR_VX(name##_vx) + + #define DEFINE_VECTOR_VVM(name) \ + add_vector_vvm_insn(this, #name, match_##name, mask_##name | mask_vm) + + #define DEFINE_VECTOR_VXM(name) \ + add_vector_vxm_insn(this, #name, match_##name, mask_##name | mask_vm) + + #define DEFINE_VECTOR_VIM(name) \ + add_vector_vim_insn(this, #name, match_##name, mask_##name | mask_vm) + + #define DISASM_OPIV_VXIM_INSN(name) \ + DEFINE_VECTOR_VVM(name##_vvm); \ + DEFINE_VECTOR_VXM(name##_vxm); \ + DEFINE_VECTOR_VIM(name##_vim) + + #define DISASM_OPIV_VX_M_INSN(name) \ + DEFINE_VECTOR_VVM(name##_vvm); \ + DEFINE_VECTOR_VXM(name##_vxm) + + //OPFVV/OPFVF + //0b00_0000 + DISASM_OPIV_VXI_INSN(vadd, 1, v); + DISASM_OPIV_VX__INSN(vsub, 1); + DISASM_OPIV__XI_INSN(vrsub, 1); + DISASM_OPIV_VX__INSN(vminu, 0); + DISASM_OPIV_VX__INSN(vmin, 1); + DISASM_OPIV_VX__INSN(vmaxu, 1); + DISASM_OPIV_VX__INSN(vmax, 0); + DISASM_OPIV_VXI_INSN(vand, 1, v); + DISASM_OPIV_VXI_INSN(vor, 1, v); + DISASM_OPIV_VXI_INSN(vxor, 1, v); + DISASM_OPIV_VXI_INSN(vrgather, 0, v); + DISASM_OPIV_V___INSN(vrgatherei16, 0); + DISASM_OPIV__XI_INSN(vslideup, 0); + DISASM_OPIV__XI_INSN(vslidedown, 0); + + //0b01_0000 + DISASM_OPIV_VXIM_INSN(vadc); + DISASM_OPIV_VX_M_INSN(vsbc); + DISASM_OPIV_VXIM_INSN(vmadc); + DISASM_OPIV_VXI_INSN(vmadc, 1, v); + DISASM_OPIV_VX_M_INSN(vmsbc); + DISASM_OPIV_VX__INSN(vmsbc, 1); + DISASM_OPIV_VXIM_INSN(vmerge); + DISASM_INSN("vmv.v.i", vmv_v_i, 0, {&vd, &v_simm5}); + DISASM_INSN("vmv.v.v", vmv_v_v, 0, {&vd, &vs1}); + DISASM_INSN("vmv.v.x", vmv_v_x, 0, {&vd, &xrs1}); + DISASM_OPIV_VXI_INSN(vmseq, 1, v); + DISASM_OPIV_VXI_INSN(vmsne, 1, v); + DISASM_OPIV_VX__INSN(vmsltu, 0); + DISASM_OPIV_VX__INSN(vmslt, 1); + DISASM_OPIV_VXI_INSN(vmsleu, 0, v); + DISASM_OPIV_VXI_INSN(vmsle, 1, v); + DISASM_OPIV__XI_INSN(vmsgtu, 0); + DISASM_OPIV__XI_INSN(vmsgt, 1); + + //0b10_0000 + DISASM_OPIV_VXI_INSN(vsaddu, 0, v); + DISASM_OPIV_VXI_INSN(vsadd, 1, v); + DISASM_OPIV_VX__INSN(vssubu, 0); + DISASM_OPIV_VX__INSN(vssub, 1); + DISASM_OPIV_VXI_INSN(vsll, 1, v); + DISASM_INSN("vmv1r.v", vmv1r_v, 0, {&vd, &vs2}); + DISASM_INSN("vmv2r.v", vmv2r_v, 0, {&vd, &vs2}); + DISASM_INSN("vmv4r.v", vmv4r_v, 0, {&vd, &vs2}); + DISASM_INSN("vmv8r.v", vmv8r_v, 0, {&vd, &vs2}); + DISASM_OPIV_VX__INSN(vsmul, 1); + DISASM_OPIV_VXI_INSN(vsrl, 0, v); + DISASM_OPIV_VXI_INSN(vsra, 0, v); + DISASM_OPIV_VXI_INSN(vssrl, 0, v); + DISASM_OPIV_VXI_INSN(vssra, 0, v); + DISASM_OPIV_VXI_INSN(vnsrl, 0, w); + DISASM_OPIV_VXI_INSN(vnsra, 0, w); + DISASM_OPIV_VXI_INSN(vnclipu, 0, w); + DISASM_OPIV_VXI_INSN(vnclip, 0, w); + + //0b11_0000 + DISASM_OPIV_S___INSN(vwredsumu, 0); + DISASM_OPIV_S___INSN(vwredsum, 1); + + //OPMVV/OPMVX + //0b00_0000 + DISASM_OPIV_VX__INSN(vaaddu, 0); + DISASM_OPIV_VX__INSN(vaadd, 0); + DISASM_OPIV_VX__INSN(vasubu, 0); + DISASM_OPIV_VX__INSN(vasub, 0); + + DISASM_OPIV_S___INSN(vredsum, 1); + DISASM_OPIV_S___INSN(vredand, 1); + DISASM_OPIV_S___INSN(vredor, 1); + DISASM_OPIV_S___INSN(vredxor, 1); + DISASM_OPIV_S___INSN(vredminu, 0); + DISASM_OPIV_S___INSN(vredmin, 1); + DISASM_OPIV_S___INSN(vredmaxu, 0); + DISASM_OPIV_S___INSN(vredmax, 1); + DISASM_OPIV__X__INSN(vslide1up, 1); + DISASM_OPIV__X__INSN(vslide1down,1); + + //0b01_0000 + //VWXUNARY0 + DISASM_INSN("vmv.x.s", vmv_x_s, 0, {&xrd, &vs2}); + DISASM_INSN("vcpop.m", vcpop_m, 0, {&xrd, &vs2, opt, &vm}); + DISASM_INSN("vfirst.m", vfirst_m, 0, {&xrd, &vs2, opt, &vm}); + + //VRXUNARY0 + DISASM_INSN("vmv.s.x", vmv_s_x, 0, {&vd, &xrs1}); + + //VXUNARY0 + DEFINE_VECTOR_V(vzext_vf2); + DEFINE_VECTOR_V(vsext_vf2); + DEFINE_VECTOR_V(vzext_vf4); + DEFINE_VECTOR_V(vsext_vf4); + DEFINE_VECTOR_V(vzext_vf8); + DEFINE_VECTOR_V(vsext_vf8); + + //VMUNARY0 + DEFINE_VECTOR_V(vmsbf_m); + DEFINE_VECTOR_V(vmsof_m); + DEFINE_VECTOR_V(vmsif_m); + DEFINE_VECTOR_V(viota_m); + DISASM_INSN("vid.v", vid_v, 0, {&vd, opt, &vm}); + + DISASM_INSN("vid.v", vid_v, 0, {&vd, opt, &vm}); + + DISASM_INSN("vcompress.vm", vcompress_vm, 0, {&vd, &vs2, &vs1}); + + DISASM_OPIV_M___INSN(vmandn, 1); + DISASM_OPIV_M___INSN(vmand, 1); + DISASM_OPIV_M___INSN(vmor, 1); + DISASM_OPIV_M___INSN(vmxor, 1); + DISASM_OPIV_M___INSN(vmorn, 1); + DISASM_OPIV_M___INSN(vmnand, 1); + DISASM_OPIV_M___INSN(vmnor, 1); + DISASM_OPIV_M___INSN(vmxnor, 1); + + //0b10_0000 + DISASM_OPIV_VX__INSN(vdivu, 0); + DISASM_OPIV_VX__INSN(vdiv, 1); + DISASM_OPIV_VX__INSN(vremu, 0); + DISASM_OPIV_VX__INSN(vrem, 1); + DISASM_OPIV_VX__INSN(vmulhu, 0); + DISASM_OPIV_VX__INSN(vmul, 1); + DISASM_OPIV_VX__INSN(vmulhsu, 0); + DISASM_OPIV_VX__INSN(vmulh, 1); + DISASM_OPIV_VX__INSN(vmadd, 1); + DISASM_OPIV_VX__INSN(vnmsub, 1); + DISASM_OPIV_VX__INSN(vmacc, 1); + DISASM_OPIV_VX__INSN(vnmsac, 1); + + //0b11_0000 + DISASM_OPIV_VX__INSN(vwaddu, 0); + DISASM_OPIV_VX__INSN(vwadd, 1); + DISASM_OPIV_VX__INSN(vwsubu, 0); + DISASM_OPIV_VX__INSN(vwsub, 1); + DISASM_OPIV_W___INSN(vwaddu, 0); + DISASM_OPIV_W___INSN(vwadd, 1); + DISASM_OPIV_W___INSN(vwsubu, 0); + DISASM_OPIV_W___INSN(vwsub, 1); + DISASM_OPIV_VX__INSN(vwmulu, 0); + DISASM_OPIV_VX__INSN(vwmulsu, 0); + DISASM_OPIV_VX__INSN(vwmul, 1); + DISASM_OPIV_VX__INSN(vwmaccu, 0); + DISASM_OPIV_VX__INSN(vwmacc, 1); + DISASM_OPIV__X__INSN(vwmaccus, 1); + DISASM_OPIV_VX__INSN(vwmaccsu, 0); + + #undef DISASM_OPIV_VXI_INSN + #undef DISASM_OPIV_VX__INSN + #undef DISASM_OPIV__XI_INSN + #undef DISASM_OPIV_V___INSN + #undef DISASM_OPIV_S___INSN + #undef DISASM_OPIV_W___INSN + #undef DISASM_OPIV_M___INSN + #undef DISASM_OPIV__X__INSN + #undef DISASM_OPIV_VXIM_INSN + #undef DISASM_OPIV_VX_M_INSN + + #define DISASM_OPIV_VF_INSN(name) \ + DEFINE_VECTOR_VV(name##_vv); \ + DEFINE_VECTOR_VF(name##_vf) + + #define DISASM_OPIV_WF_INSN(name) \ + DEFINE_VECTOR_VV(name##_wv); \ + DEFINE_VECTOR_VF(name##_wf) + + #define DISASM_OPIV_S__INSN(name) \ + DEFINE_VECTOR_VV(name##_vs) + + #define DISASM_OPIV__F_INSN(name) \ + DEFINE_VECTOR_VF(name##_vf) + + #define DISASM_VFUNARY0_INSN(name, suf) \ + DEFINE_VECTOR_V(name##cvt_rtz_xu_f_##suf); \ + DEFINE_VECTOR_V(name##cvt_rtz_x_f_##suf); \ + DEFINE_VECTOR_V(name##cvt_xu_f_##suf); \ + DEFINE_VECTOR_V(name##cvt_x_f_##suf); \ + DEFINE_VECTOR_V(name##cvt_f_xu_##suf); \ + DEFINE_VECTOR_V(name##cvt_f_x_##suf) + + //OPFVV/OPFVF + //0b00_0000 + DISASM_OPIV_VF_INSN(vfadd); + DISASM_OPIV_S__INSN(vfredusum); + DISASM_OPIV_VF_INSN(vfsub); + DISASM_OPIV_S__INSN(vfredosum); + DISASM_OPIV_VF_INSN(vfmin); + DISASM_OPIV_S__INSN(vfredmin); + DISASM_OPIV_VF_INSN(vfmax); + DISASM_OPIV_S__INSN(vfredmax); + DISASM_OPIV_VF_INSN(vfsgnj); + DISASM_OPIV_VF_INSN(vfsgnjn); + DISASM_OPIV_VF_INSN(vfsgnjx); + DISASM_INSN("vfmv.f.s", vfmv_f_s, 0, {&frd, &vs2}); + DISASM_INSN("vfmv.s.f", vfmv_s_f, mask_vfmv_s_f, {&vd, &frs1}); + DISASM_OPIV__F_INSN(vfslide1up); + DISASM_OPIV__F_INSN(vfslide1down); + + //0b01_0000 + DISASM_INSN("vfmerge.vfm", vfmerge_vfm, 0, {&vd, &vs2, &frs1, &v0}); + DISASM_INSN("vfmv.v.f", vfmv_v_f, 0, {&vd, &frs1}); + DISASM_OPIV_VF_INSN(vmfeq); + DISASM_OPIV_VF_INSN(vmfle); + DISASM_OPIV_VF_INSN(vmflt); + DISASM_OPIV_VF_INSN(vmfne); + DISASM_OPIV__F_INSN(vmfgt); + DISASM_OPIV__F_INSN(vmfge); + + //0b10_0000 + DISASM_OPIV_VF_INSN(vfdiv); + DISASM_OPIV__F_INSN(vfrdiv); + + //vfunary0 + DISASM_VFUNARY0_INSN(vf, v); + DISASM_VFUNARY0_INSN(vfw, v); + DEFINE_VECTOR_V(vfwcvt_f_f_v); + + DISASM_VFUNARY0_INSN(vfn, w); + DEFINE_VECTOR_V(vfncvt_f_f_w); + DEFINE_VECTOR_V(vfncvt_rod_f_f_w); + + //vfunary1 + DEFINE_VECTOR_V(vfsqrt_v); + DEFINE_VECTOR_V(vfrsqrt7_v); + DEFINE_VECTOR_V(vfrec7_v); + DEFINE_VECTOR_V(vfclass_v); + + DISASM_OPIV_VF_INSN(vfmul); + DISASM_OPIV__F_INSN(vfrsub); + DISASM_OPIV_VF_INSN(vfmadd); + DISASM_OPIV_VF_INSN(vfnmadd); + DISASM_OPIV_VF_INSN(vfmsub); + DISASM_OPIV_VF_INSN(vfnmsub); + DISASM_OPIV_VF_INSN(vfmacc); + DISASM_OPIV_VF_INSN(vfnmacc); + DISASM_OPIV_VF_INSN(vfmsac); + DISASM_OPIV_VF_INSN(vfnmsac); + + //0b11_0000 + DISASM_OPIV_VF_INSN(vfwadd); + DISASM_OPIV_S__INSN(vfwredusum); + DISASM_OPIV_VF_INSN(vfwsub); + DISASM_OPIV_S__INSN(vfwredosum); + DISASM_OPIV_WF_INSN(vfwadd); + DISASM_OPIV_WF_INSN(vfwsub); + DISASM_OPIV_VF_INSN(vfwmul); + DISASM_OPIV_VF_INSN(vfwmacc); + DISASM_OPIV_VF_INSN(vfwnmacc); + DISASM_OPIV_VF_INSN(vfwmsac); + DISASM_OPIV_VF_INSN(vfwnmsac); + + #undef DISASM_OPIV_VF_INSN + #undef DISASM_OPIV__F_INSN + #undef DISASM_OPIV_S__INSN + #undef DISASM_OPIV_W__INSN + #undef DISASM_VFUNARY0_INSN + + // vector amo + std::vector v_fmt_amo_wd = {&vd, &v_address, &vs2, &vd, opt, &vm}; + std::vector v_fmt_amo = {&x0, &v_address, &vs2, &vd, opt, &vm}; + for (size_t elt = 0; elt <= 3; ++elt) { + const custom_fmt_t template_insn[] = { + {match_vamoaddei8_v | mask_wd, mask_vamoaddei8_v | mask_wd, + "%sei%d.v", v_fmt_amo_wd}, + {match_vamoaddei8_v, mask_vamoaddei8_v | mask_wd, + "%sei%d.v", v_fmt_amo}, + }; + std::pair amo_map[] = { + {"vamoswap", 0x01ul << 27}, + {"vamoadd", 0x00ul << 27}, + {"vamoxor", 0x04ul << 27}, + {"vamoand", 0x0cul << 27}, + {"vamoor", 0x08ul << 27}, + {"vamomin", 0x10ul << 27}, + {"vamomax", 0x14ul << 27}, + {"vamominu", 0x18ul << 27}, + {"vamomaxu", 0x1cul << 27}}; + const reg_t elt_map[] = {0x0ul << 12, 0x5ul << 12, + 0x6ul <<12, 0x7ul << 12}; + + for (size_t idx = 0; idx < sizeof(amo_map) / sizeof(amo_map[0]); ++idx) { + for (auto item : template_insn) { + char buf[128]; + sprintf(buf, item.fmt, amo_map[idx].first, 8 << elt); + add_insn(new disasm_insn_t(buf, + item.match | amo_map[idx].second | elt_map[elt], + item.mask, + item.arg)); + } + } + } + } + +#define DEFINE_PI3TYPE(code) add_pitype3_insn(this, #code, match_##code, mask_##code); +#define DEFINE_PI4TYPE(code) add_pitype4_insn(this, #code, match_##code, mask_##code); +#define DEFINE_PI5TYPE(code) add_pitype5_insn(this, #code, match_##code, mask_##code); +#define DEFINE_PI6TYPE(code) add_pitype6_insn(this, #code, match_##code, mask_##code); + +#define DISASM_8_AND_16_RINSN(code) \ + DEFINE_RTYPE(code##8); \ + DEFINE_RTYPE(code##16); + +#define DISASM_8_AND_16_RINSN_ROUND(code) \ + DEFINE_RTYPE(code##8_u); \ + DEFINE_RTYPE(code##16_u); + +#define DISASM_8_AND_16_PIINSN(code) \ + DEFINE_PI3TYPE(code##8); \ + DEFINE_PI4TYPE(code##16); + +#define DISASM_8_AND_16_PIINSN_ROUND(code) \ + DEFINE_PI3TYPE(code##8_u); \ + DEFINE_PI4TYPE(code##16_u); + +#define DISASM_RINSN_AND_ROUND(code) \ + DEFINE_RTYPE(code); \ + DEFINE_RTYPE(code##_u); \ + + if (isa->extension_enabled(EXT_ZMMUL)) { + DEFINE_RTYPE(mul); + DEFINE_RTYPE(mulh); + DEFINE_RTYPE(mulhu); + DEFINE_RTYPE(mulhsu); + DEFINE_RTYPE(mulw); + } + + if (isa->extension_enabled(EXT_ZBPBO)) { + DEFINE_RTYPE(min); + DEFINE_RTYPE(max); + DEFINE_R3TYPE(cmix); + DEFINE_RTYPE(pack); + DEFINE_RTYPE(packu); + add_insn(new disasm_insn_t("rev", match_grevi | ((isa->get_max_xlen() - 1) << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); + add_insn(new disasm_insn_t("rev8.h", match_grevi | (0x8 << imm_shift), mask_grevi | mask_imm, {&xrd, &xrs1})); // swap16 + if (isa->get_max_xlen() == 32) { + DEFINE_R1TYPE(clz); + DEFINE_R3TYPE(fsr); + DEFINE_R3TYPE(fsri); + } else { + DEFINE_R3TYPE(fsrw); + } + } + + if (isa->extension_enabled(EXT_ZPSFOPERAND)) { + DEFINE_RTYPE(smal) + DEFINE_RTYPE(radd64); + DEFINE_RTYPE(uradd64); + DEFINE_RTYPE(kadd64); + DEFINE_RTYPE(ukadd64); + DEFINE_RTYPE(rsub64); + DEFINE_RTYPE(ursub64); + DEFINE_RTYPE(ksub64); + DEFINE_RTYPE(uksub64); + DEFINE_RTYPE(smar64); + DEFINE_RTYPE(smsr64); + DEFINE_RTYPE(umar64); + DEFINE_RTYPE(umsr64); + DEFINE_RTYPE(kmar64); + DEFINE_RTYPE(kmsr64); + DEFINE_RTYPE(ukmar64); + DEFINE_RTYPE(ukmsr64); + DEFINE_RTYPE(smalbb); + DEFINE_RTYPE(smalbt); + DEFINE_RTYPE(smaltt); + DEFINE_RTYPE(smalda); + DEFINE_RTYPE(smalxda); + DEFINE_RTYPE(smalds); + DEFINE_RTYPE(smaldrs); + DEFINE_RTYPE(smalxds); + DEFINE_RTYPE(smslda); + DEFINE_RTYPE(smslxda); + DEFINE_RTYPE(mulr64); + DEFINE_RTYPE(mulsr64); + if (isa->get_max_xlen() == 32) { + DEFINE_RTYPE(add64); + DEFINE_RTYPE(sub64); + } + } + + if (isa->extension_enabled(EXT_ZPN)) { + DISASM_8_AND_16_RINSN(add); + DISASM_8_AND_16_RINSN(radd); + DISASM_8_AND_16_RINSN(uradd); + DISASM_8_AND_16_RINSN(kadd); + DISASM_8_AND_16_RINSN(ukadd); + DISASM_8_AND_16_RINSN(sub); + DISASM_8_AND_16_RINSN(rsub); + DISASM_8_AND_16_RINSN(ursub); + DISASM_8_AND_16_RINSN(ksub); + DISASM_8_AND_16_RINSN(uksub); + DEFINE_RTYPE(cras16); + DEFINE_RTYPE(rcras16); + DEFINE_RTYPE(urcras16); + DEFINE_RTYPE(kcras16); + DEFINE_RTYPE(ukcras16); + DEFINE_RTYPE(crsa16); + DEFINE_RTYPE(rcrsa16); + DEFINE_RTYPE(urcrsa16); + DEFINE_RTYPE(kcrsa16); + DEFINE_RTYPE(ukcrsa16); + DEFINE_RTYPE(stas16); + DEFINE_RTYPE(rstas16); + DEFINE_RTYPE(urstas16); + DEFINE_RTYPE(kstas16); + DEFINE_RTYPE(ukstas16); + DEFINE_RTYPE(stsa16); + DEFINE_RTYPE(rstsa16); + DEFINE_RTYPE(urstsa16); + DEFINE_RTYPE(kstsa16); + DEFINE_RTYPE(ukstsa16); + DISASM_8_AND_16_RINSN(sra); + DISASM_8_AND_16_RINSN(srl); + DISASM_8_AND_16_RINSN(sll); + DISASM_8_AND_16_RINSN(ksll); + DISASM_8_AND_16_RINSN(kslra); + DISASM_8_AND_16_PIINSN(srai); + DISASM_8_AND_16_PIINSN(srli); + DISASM_8_AND_16_PIINSN(slli); + DISASM_8_AND_16_PIINSN(kslli); + DISASM_8_AND_16_RINSN_ROUND(sra); + DISASM_8_AND_16_RINSN_ROUND(srl); + DISASM_8_AND_16_RINSN_ROUND(kslra); + DISASM_8_AND_16_PIINSN_ROUND(srai); + DISASM_8_AND_16_PIINSN_ROUND(srli); + + DISASM_8_AND_16_RINSN(cmpeq); + DISASM_8_AND_16_RINSN(scmplt); + DISASM_8_AND_16_RINSN(scmple); + DISASM_8_AND_16_RINSN(ucmplt); + DISASM_8_AND_16_RINSN(ucmple); + + DISASM_8_AND_16_RINSN(smul); + DISASM_8_AND_16_RINSN(smulx); + DISASM_8_AND_16_RINSN(umul); + DISASM_8_AND_16_RINSN(umulx); + DISASM_8_AND_16_RINSN(khm); + DISASM_8_AND_16_RINSN(khmx); + + DISASM_8_AND_16_RINSN(smin); + DISASM_8_AND_16_RINSN(umin); + DISASM_8_AND_16_RINSN(smax); + DISASM_8_AND_16_RINSN(umax); + DISASM_8_AND_16_PIINSN(sclip); + DISASM_8_AND_16_PIINSN(uclip); + DEFINE_R1TYPE(kabs16); + DEFINE_R1TYPE(clrs16); + DEFINE_R1TYPE(clz16); + DEFINE_R1TYPE(kabs8); + DEFINE_R1TYPE(clrs8); + DEFINE_R1TYPE(clz8); + + DEFINE_R1TYPE(sunpkd810); + DEFINE_R1TYPE(sunpkd820); + DEFINE_R1TYPE(sunpkd830); + DEFINE_R1TYPE(sunpkd831); + DEFINE_R1TYPE(sunpkd832); + DEFINE_R1TYPE(zunpkd810); + DEFINE_R1TYPE(zunpkd820); + DEFINE_R1TYPE(zunpkd830); + DEFINE_R1TYPE(zunpkd831); + DEFINE_R1TYPE(zunpkd832); + + DEFINE_RTYPE(pkbb16); + DEFINE_RTYPE(pkbt16); + DEFINE_RTYPE(pktb16); + DEFINE_RTYPE(pktt16); + DISASM_RINSN_AND_ROUND(smmul); + DISASM_RINSN_AND_ROUND(kmmac); + DISASM_RINSN_AND_ROUND(kmmsb); + DISASM_RINSN_AND_ROUND(kwmmul); + DISASM_RINSN_AND_ROUND(smmwb); + DISASM_RINSN_AND_ROUND(smmwt); + DISASM_RINSN_AND_ROUND(kmmawb); + DISASM_RINSN_AND_ROUND(kmmawt); + DISASM_RINSN_AND_ROUND(kmmwb2); + DISASM_RINSN_AND_ROUND(kmmwt2); + DISASM_RINSN_AND_ROUND(kmmawb2); + DISASM_RINSN_AND_ROUND(kmmawt2); + DEFINE_RTYPE(smbb16) + DEFINE_RTYPE(smbt16) + DEFINE_RTYPE(smtt16) + DEFINE_RTYPE(kmda) + DEFINE_RTYPE(kmxda) + DEFINE_RTYPE(smds) + DEFINE_RTYPE(smdrs) + DEFINE_RTYPE(smxds) + DEFINE_RTYPE(kmabb) + DEFINE_RTYPE(kmabt) + DEFINE_RTYPE(kmatt) + DEFINE_RTYPE(kmada) + DEFINE_RTYPE(kmaxda) + DEFINE_RTYPE(kmads) + DEFINE_RTYPE(kmadrs) + DEFINE_RTYPE(kmaxds) + DEFINE_RTYPE(kmsda) + DEFINE_RTYPE(kmsxda) + DEFINE_RTYPE(sclip32) + DEFINE_RTYPE(uclip32) + DEFINE_R1TYPE(clrs32); + DEFINE_R1TYPE(clz32); + DEFINE_RTYPE(pbsad); + DEFINE_RTYPE(pbsada); + DEFINE_RTYPE(smaqa); + DEFINE_RTYPE(umaqa); + DEFINE_RTYPE(smaqa_su); + + DEFINE_RTYPE(kaddh); + DEFINE_RTYPE(ksubh); + DEFINE_RTYPE(khmbb); + DEFINE_RTYPE(khmbt); + DEFINE_RTYPE(khmtt); + DEFINE_RTYPE(ukaddh); + DEFINE_RTYPE(uksubh); + DEFINE_RTYPE(kaddw); + DEFINE_RTYPE(ukaddw); + DEFINE_RTYPE(ksubw); + DEFINE_RTYPE(uksubw); + DEFINE_RTYPE(kdmbb); + DEFINE_RTYPE(kdmbt); + DEFINE_RTYPE(kdmtt); + DEFINE_RTYPE(kslraw); + DEFINE_RTYPE(kslraw_u); + DEFINE_RTYPE(ksllw); + DEFINE_PI5TYPE(kslliw); + DEFINE_RTYPE(kdmabb); + DEFINE_RTYPE(kdmabt); + DEFINE_RTYPE(kdmatt); + DEFINE_RTYPE(kabsw); + DEFINE_RTYPE(raddw); + DEFINE_RTYPE(uraddw); + DEFINE_RTYPE(rsubw); + DEFINE_RTYPE(ursubw); + DEFINE_RTYPE(msubr32); + DEFINE_RTYPE(ave); + DEFINE_RTYPE(sra_u); + DEFINE_PI5TYPE(srai_u); + DEFINE_PI3TYPE(insb); + DEFINE_RTYPE(maddr32) + + if (isa->get_max_xlen() == 64) { + DEFINE_RTYPE(add32); + DEFINE_RTYPE(radd32); + DEFINE_RTYPE(uradd32); + DEFINE_RTYPE(kadd32); + DEFINE_RTYPE(ukadd32); + DEFINE_RTYPE(sub32); + DEFINE_RTYPE(rsub32); + DEFINE_RTYPE(ursub32); + DEFINE_RTYPE(ksub32); + DEFINE_RTYPE(uksub32); + DEFINE_RTYPE(cras32); + DEFINE_RTYPE(rcras32); + DEFINE_RTYPE(urcras32); + DEFINE_RTYPE(kcras32); + DEFINE_RTYPE(ukcras32); + DEFINE_RTYPE(crsa32); + DEFINE_RTYPE(rcrsa32); + DEFINE_RTYPE(urcrsa32); + DEFINE_RTYPE(kcrsa32); + DEFINE_RTYPE(ukcrsa32); + DEFINE_RTYPE(stas32); + DEFINE_RTYPE(rstas32); + DEFINE_RTYPE(urstas32); + DEFINE_RTYPE(kstas32); + DEFINE_RTYPE(ukstas32); + DEFINE_RTYPE(stsa32); + DEFINE_RTYPE(rstsa32); + DEFINE_RTYPE(urstsa32); + DEFINE_RTYPE(kstsa32); + DEFINE_RTYPE(ukstsa32); + DEFINE_RTYPE(sra32); + DEFINE_PI5TYPE(srai32); + DEFINE_RTYPE(sra32_u); + DEFINE_PI5TYPE(srai32_u); + DEFINE_RTYPE(srl32); + DEFINE_PI5TYPE(srli32); + DEFINE_RTYPE(srl32_u); + DEFINE_PI5TYPE(srli32_u); + DEFINE_RTYPE(sll32); + DEFINE_PI5TYPE(slli32); + DEFINE_RTYPE(ksll32); + DEFINE_PI5TYPE(kslli32); + DEFINE_RTYPE(kslra32); + DEFINE_RTYPE(kslra32_u); + DEFINE_RTYPE(smin32); + DEFINE_RTYPE(umin32); + DEFINE_RTYPE(smax32); + DEFINE_RTYPE(umax32); + DEFINE_R1TYPE(kabs32); + DEFINE_RTYPE(khmbb16); + DEFINE_RTYPE(khmbt16); + DEFINE_RTYPE(khmtt16); + DEFINE_RTYPE(kdmbb16); + DEFINE_RTYPE(kdmbt16); + DEFINE_RTYPE(kdmtt16); + DEFINE_RTYPE(kdmabb16); + DEFINE_RTYPE(kdmabt16); + DEFINE_RTYPE(kdmatt16); + DEFINE_RTYPE(smbt32); + DEFINE_RTYPE(smtt32); + DEFINE_RTYPE(kmabb32); + DEFINE_RTYPE(kmabt32); + DEFINE_RTYPE(kmatt32); + DEFINE_RTYPE(kmda32); + DEFINE_RTYPE(kmxda32); + DEFINE_RTYPE(kmaxda32); + DEFINE_RTYPE(kmads32); + DEFINE_RTYPE(kmadrs32); + DEFINE_RTYPE(kmaxds32); + DEFINE_RTYPE(kmsda32); + DEFINE_RTYPE(kmsxda32); + DEFINE_RTYPE(smds32); + DEFINE_RTYPE(smdrs32); + DEFINE_RTYPE(smxds32); + DEFINE_PI5TYPE(sraiw_u); + DEFINE_RTYPE(pkbb32); + DEFINE_RTYPE(pkbt32); + DEFINE_RTYPE(pktb32); + DEFINE_RTYPE(pktt32); + } + } + + if (isa->extension_enabled(EXT_XZBP)) { + DEFINE_ITYPE_SHIFT(grevi); + DEFINE_ITYPE_SHIFT(gorci); + DEFINE_RTYPE(pack); + DEFINE_RTYPE(packh); + DEFINE_RTYPE(packu); + DEFINE_RTYPE(grev); + DEFINE_RTYPE(gorc); + DEFINE_RTYPE(xperm4); + DEFINE_RTYPE(xperm8); + DEFINE_RTYPE(xperm16); + DEFINE_RTYPE(xperm32); + } + + if (isa->extension_enabled(EXT_XZBP) || + isa->extension_enabled(EXT_XZBE) || + isa->extension_enabled(EXT_XZBF)) { + if(isa->get_max_xlen() == 64) { + DEFINE_RTYPE(packw); + } + } + + if (isa->extension_enabled(EXT_XZBT)) { + DEFINE_R3TYPE(cmix); + DEFINE_R3TYPE(fsr); + DEFINE_R3TYPE(fsri); + if(isa->get_max_xlen() == 64) { + DEFINE_R3TYPE(fsriw); + DEFINE_R3TYPE(fsrw); + } + } + + if (isa->extension_enabled(EXT_ZICBOM)) { + DISASM_INSN("cbo.clean", cbo_clean, 0, {&xrs1}); + DISASM_INSN("cbo.flush", cbo_flush, 0, {&xrs1}); + DISASM_INSN("cbo.inval", cbo_inval, 0, {&xrs1}); + } + + if (isa->extension_enabled(EXT_ZICBOZ)) { + DISASM_INSN("cbo.zero", cbo_zero, 0, {&xrs1}); + } + + if (isa->extension_enabled(EXT_ZKND) || + isa->extension_enabled(EXT_ZKNE)) { + DISASM_INSN("aes64ks1i", aes64ks1i, 0, {&xrd, &xrs1, &rcon}); + DEFINE_RTYPE(aes64ks2); + } + + if (isa->extension_enabled(EXT_ZKND)) { + if(isa->get_max_xlen() == 64) { + DEFINE_RTYPE(aes64ds); + DEFINE_RTYPE(aes64dsm); + DEFINE_R1TYPE(aes64im); + } else if (isa->get_max_xlen() == 32) { + DISASM_INSN("aes32dsi", aes32dsi, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("aes32dsmi", aes32dsmi, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + } + + if (isa->extension_enabled(EXT_ZKNE)) { + if(isa->get_max_xlen() == 64) { + DEFINE_RTYPE(aes64es); + DEFINE_RTYPE(aes64esm); + } else if (isa->get_max_xlen() == 32) { + DISASM_INSN("aes32esi", aes32esi, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("aes32esmi", aes32esmi, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + } + + if (isa->extension_enabled(EXT_ZKNH)) { + DEFINE_R1TYPE(sha256sig0); + DEFINE_R1TYPE(sha256sig1); + DEFINE_R1TYPE(sha256sum0); + DEFINE_R1TYPE(sha256sum1); + if(isa->get_max_xlen() == 64) { + DEFINE_R1TYPE(sha512sig0); + DEFINE_R1TYPE(sha512sig1); + DEFINE_R1TYPE(sha512sum0); + DEFINE_R1TYPE(sha512sum1); + } else if (isa->get_max_xlen() == 32) { + DEFINE_RTYPE(sha512sig0h); + DEFINE_RTYPE(sha512sig0l); + DEFINE_RTYPE(sha512sig1h); + DEFINE_RTYPE(sha512sig1l); + DEFINE_RTYPE(sha512sum0r); + DEFINE_RTYPE(sha512sum1r); + } + } + + if (isa->extension_enabled(EXT_ZKSED)) { + DISASM_INSN("sm4ed", sm4ed, 0, {&xrd, &xrs1, &xrs2, &bs}); + DISASM_INSN("sm4ks", sm4ks, 0, {&xrd, &xrs1, &xrs2, &bs}); + } + + if (isa->extension_enabled(EXT_ZKSH)) { + DEFINE_R1TYPE(sm3p0); + DEFINE_R1TYPE(sm3p1); + } + +} + +disassembler_t::disassembler_t(const isa_parser_t *isa) +{ + // highest priority: instructions explicitly enabled + add_instructions(isa); + + // next-highest priority: other instructions in same base ISA + std::string fallback_isa_string = std::string("rv") + std::to_string(isa->get_max_xlen()) + + "gcv_zfh_zba_zbb_zbc_zbs_zkn_zkr_zks_xbitmanip"; + isa_parser_t fallback_isa(fallback_isa_string.c_str(), DEFAULT_PRIV); + add_instructions(&fallback_isa); + + // finally: instructions with known opcodes but unknown arguments + add_unknown_insns(this); +} + +const disasm_insn_t* disassembler_t::probe_once(insn_t insn, size_t idx) const +{ + for (size_t j = 0; j < chain[idx].size(); j++) + if(*chain[idx][j] == insn) + return chain[idx][j]; + + return NULL; +} + +const disasm_insn_t* disassembler_t::lookup(insn_t insn) const +{ + if (auto p = probe_once(insn, hash(insn.bits(), MASK1))) + return p; + + if (auto p = probe_once(insn, hash(insn.bits(), MASK2))) + return p; + + return probe_once(insn, HASH_SIZE); +} + +void NOINLINE disassembler_t::add_insn(disasm_insn_t* insn) +{ + size_t idx = + (insn->get_mask() & MASK1) == MASK1 ? hash(insn->get_match(), MASK1) : + (insn->get_mask() & MASK2) == MASK2 ? hash(insn->get_match(), MASK2) : + HASH_SIZE; + + chain[idx].push_back(insn); +} + +disassembler_t::~disassembler_t() +{ + for (size_t i = 0; i < HASH_SIZE+1; i++) + for (size_t j = 0; j < chain[i].size(); j++) + delete chain[i][j]; +} diff --git a/vendor/riscv-isa-sim/disasm/disasm.mk.in b/vendor/riscv-isa-sim/disasm/disasm.mk.in new file mode 100644 index 00000000..9eafb12f --- /dev/null +++ b/vendor/riscv-isa-sim/disasm/disasm.mk.in @@ -0,0 +1,5 @@ +disasm_srcs = \ + disasm.cc \ + regnames.cc \ + +disasm_install_lib = yes diff --git a/vendor/riscv-isa-sim/disasm/regnames.cc b/vendor/riscv-isa-sim/disasm/regnames.cc new file mode 100644 index 00000000..0a7fd4d2 --- /dev/null +++ b/vendor/riscv-isa-sim/disasm/regnames.cc @@ -0,0 +1,33 @@ +// See LICENSE for license details. + +#include "disasm.h" + +const char* xpr_name[] = { + "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", + "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", + "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6" +}; + +const char* fpr_name[] = { + "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", + "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", + "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", + "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11" +}; + +const char* vr_name[] = { + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" +}; + +const char* csr_name(int which) { + switch (which) { + #define DECLARE_CSR(name, number) case number: return #name; + #include "encoding.h" + #undef DECLARE_CSR + } + return "unknown-csr"; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt.ac b/vendor/riscv-isa-sim/fdt/fdt.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/fdt/fdt.c b/vendor/riscv-isa-sim/fdt/fdt.c new file mode 100644 index 00000000..16fd0612 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include "fdt.h" +#include "libfdt.h" + +#include "libfdt_internal.h" + +/* + * Minimal sanity check for a read-only tree. fdt_ro_probe_() checks + * that the given buffer contains what appears to be a flattened + * device tree with sane information in its header. + */ +int32_t fdt_ro_probe_(const void *fdt) +{ + uint32_t totalsize = fdt_totalsize(fdt); + + if (fdt_magic(fdt) == FDT_MAGIC) { + /* Complete tree */ + if (fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION) + return -FDT_ERR_BADVERSION; + if (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION) + return -FDT_ERR_BADVERSION; + } else if (fdt_magic(fdt) == FDT_SW_MAGIC) { + /* Unfinished sequential-write blob */ + if (fdt_size_dt_struct(fdt) == 0) + return -FDT_ERR_BADSTATE; + } else { + return -FDT_ERR_BADMAGIC; + } + + if (totalsize < INT32_MAX) + return totalsize; + else + return -FDT_ERR_TRUNCATED; +} + +static int check_off_(uint32_t hdrsize, uint32_t totalsize, uint32_t off) +{ + return (off >= hdrsize) && (off <= totalsize); +} + +static int check_block_(uint32_t hdrsize, uint32_t totalsize, + uint32_t base, uint32_t size) +{ + if (!check_off_(hdrsize, totalsize, base)) + return 0; /* block start out of bounds */ + if ((base + size) < base) + return 0; /* overflow */ + if (!check_off_(hdrsize, totalsize, base + size)) + return 0; /* block end out of bounds */ + return 1; +} + +size_t fdt_header_size_(uint32_t version) +{ + if (version <= 1) + return FDT_V1_SIZE; + else if (version <= 2) + return FDT_V2_SIZE; + else if (version <= 3) + return FDT_V3_SIZE; + else if (version <= 16) + return FDT_V16_SIZE; + else + return FDT_V17_SIZE; +} + +int fdt_check_header(const void *fdt) +{ + size_t hdrsize; + + if (fdt_magic(fdt) != FDT_MAGIC) + return -FDT_ERR_BADMAGIC; + hdrsize = fdt_header_size(fdt); + if ((fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION) + || (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION)) + return -FDT_ERR_BADVERSION; + if (fdt_version(fdt) < fdt_last_comp_version(fdt)) + return -FDT_ERR_BADVERSION; + + if ((fdt_totalsize(fdt) < hdrsize) + || (fdt_totalsize(fdt) > INT_MAX)) + return -FDT_ERR_TRUNCATED; + + /* Bounds check memrsv block */ + if (!check_off_(hdrsize, fdt_totalsize(fdt), fdt_off_mem_rsvmap(fdt))) + return -FDT_ERR_TRUNCATED; + + /* Bounds check structure block */ + if (fdt_version(fdt) < 17) { + if (!check_off_(hdrsize, fdt_totalsize(fdt), + fdt_off_dt_struct(fdt))) + return -FDT_ERR_TRUNCATED; + } else { + if (!check_block_(hdrsize, fdt_totalsize(fdt), + fdt_off_dt_struct(fdt), + fdt_size_dt_struct(fdt))) + return -FDT_ERR_TRUNCATED; + } + + /* Bounds check strings block */ + if (!check_block_(hdrsize, fdt_totalsize(fdt), + fdt_off_dt_strings(fdt), fdt_size_dt_strings(fdt))) + return -FDT_ERR_TRUNCATED; + + return 0; +} + +const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len) +{ + unsigned absoffset = offset + fdt_off_dt_struct(fdt); + + if ((absoffset < offset) + || ((absoffset + len) < absoffset) + || (absoffset + len) > fdt_totalsize(fdt)) + return NULL; + + if (fdt_version(fdt) >= 0x11) + if (((offset + len) < offset) + || ((offset + len) > fdt_size_dt_struct(fdt))) + return NULL; + + return fdt_offset_ptr_(fdt, offset); +} + +uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset) +{ + const fdt32_t *tagp, *lenp; + uint32_t tag; + int offset = startoffset; + const char *p; + + *nextoffset = -FDT_ERR_TRUNCATED; + tagp = fdt_offset_ptr(fdt, offset, FDT_TAGSIZE); + if (!tagp) + return FDT_END; /* premature end */ + tag = fdt32_to_cpu(*tagp); + offset += FDT_TAGSIZE; + + *nextoffset = -FDT_ERR_BADSTRUCTURE; + switch (tag) { + case FDT_BEGIN_NODE: + /* skip name */ + do { + p = fdt_offset_ptr(fdt, offset++, 1); + } while (p && (*p != '\0')); + if (!p) + return FDT_END; /* premature end */ + break; + + case FDT_PROP: + lenp = fdt_offset_ptr(fdt, offset, sizeof(*lenp)); + if (!lenp) + return FDT_END; /* premature end */ + /* skip-name offset, length and value */ + offset += sizeof(struct fdt_property) - FDT_TAGSIZE + + fdt32_to_cpu(*lenp); + if (fdt_version(fdt) < 0x10 && fdt32_to_cpu(*lenp) >= 8 && + ((offset - fdt32_to_cpu(*lenp)) % 8) != 0) + offset += 4; + break; + + case FDT_END: + case FDT_END_NODE: + case FDT_NOP: + break; + + default: + return FDT_END; + } + + if (!fdt_offset_ptr(fdt, startoffset, offset - startoffset)) + return FDT_END; /* premature end */ + + *nextoffset = FDT_TAGALIGN(offset); + return tag; +} + +int fdt_check_node_offset_(const void *fdt, int offset) +{ + if ((offset < 0) || (offset % FDT_TAGSIZE) + || (fdt_next_tag(fdt, offset, &offset) != FDT_BEGIN_NODE)) + return -FDT_ERR_BADOFFSET; + + return offset; +} + +int fdt_check_prop_offset_(const void *fdt, int offset) +{ + if ((offset < 0) || (offset % FDT_TAGSIZE) + || (fdt_next_tag(fdt, offset, &offset) != FDT_PROP)) + return -FDT_ERR_BADOFFSET; + + return offset; +} + +int fdt_next_node(const void *fdt, int offset, int *depth) +{ + int nextoffset = 0; + uint32_t tag; + + if (offset >= 0) + if ((nextoffset = fdt_check_node_offset_(fdt, offset)) < 0) + return nextoffset; + + do { + offset = nextoffset; + tag = fdt_next_tag(fdt, offset, &nextoffset); + + switch (tag) { + case FDT_PROP: + case FDT_NOP: + break; + + case FDT_BEGIN_NODE: + if (depth) + (*depth)++; + break; + + case FDT_END_NODE: + if (depth && ((--(*depth)) < 0)) + return nextoffset; + break; + + case FDT_END: + if ((nextoffset >= 0) + || ((nextoffset == -FDT_ERR_TRUNCATED) && !depth)) + return -FDT_ERR_NOTFOUND; + else + return nextoffset; + } + } while (tag != FDT_BEGIN_NODE); + + return offset; +} + +int fdt_first_subnode(const void *fdt, int offset) +{ + int depth = 0; + + offset = fdt_next_node(fdt, offset, &depth); + if (offset < 0 || depth != 1) + return -FDT_ERR_NOTFOUND; + + return offset; +} + +int fdt_next_subnode(const void *fdt, int offset) +{ + int depth = 1; + + /* + * With respect to the parent, the depth of the next subnode will be + * the same as the last. + */ + do { + offset = fdt_next_node(fdt, offset, &depth); + if (offset < 0 || depth < 1) + return -FDT_ERR_NOTFOUND; + } while (depth > 1); + + return offset; +} + +const char *fdt_find_string_(const char *strtab, int tabsize, const char *s) +{ + int len = strlen(s) + 1; + const char *last = strtab + tabsize - len; + const char *p; + + for (p = strtab; p <= last; p++) + if (memcmp(p, s, len) == 0) + return p; + return NULL; +} + +int fdt_move(const void *fdt, void *buf, int bufsize) +{ + FDT_RO_PROBE(fdt); + + if (fdt_totalsize(fdt) > bufsize) + return -FDT_ERR_NOSPACE; + + memmove(buf, fdt, fdt_totalsize(fdt)); + return 0; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt.h b/vendor/riscv-isa-sim/fdt/fdt.h new file mode 100644 index 00000000..f2e68807 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +#ifndef FDT_H +#define FDT_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + * Copyright 2012 Kim Phillips, Freescale Semiconductor. + */ + +#ifndef __ASSEMBLY__ + +struct fdt_header { + fdt32_t magic; /* magic word FDT_MAGIC */ + fdt32_t totalsize; /* total size of DT block */ + fdt32_t off_dt_struct; /* offset to structure */ + fdt32_t off_dt_strings; /* offset to strings */ + fdt32_t off_mem_rsvmap; /* offset to memory reserve map */ + fdt32_t version; /* format version */ + fdt32_t last_comp_version; /* last compatible version */ + + /* version 2 fields below */ + fdt32_t boot_cpuid_phys; /* Which physical CPU id we're + booting on */ + /* version 3 fields below */ + fdt32_t size_dt_strings; /* size of the strings block */ + + /* version 17 fields below */ + fdt32_t size_dt_struct; /* size of the structure block */ +}; + +struct fdt_reserve_entry { + fdt64_t address; + fdt64_t size; +}; + +struct fdt_node_header { + fdt32_t tag; + char name[0]; +}; + +struct fdt_property { + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; + char data[0]; +}; + +#endif /* !__ASSEMBLY */ + +#define FDT_MAGIC 0xd00dfeed /* 4: version, 4: total size */ +#define FDT_TAGSIZE sizeof(fdt32_t) + +#define FDT_BEGIN_NODE 0x1 /* Start node: full name */ +#define FDT_END_NODE 0x2 /* End node */ +#define FDT_PROP 0x3 /* Property: name off, + size, content */ +#define FDT_NOP 0x4 /* nop */ +#define FDT_END 0x9 + +#define FDT_V1_SIZE (7*sizeof(fdt32_t)) +#define FDT_V2_SIZE (FDT_V1_SIZE + sizeof(fdt32_t)) +#define FDT_V3_SIZE (FDT_V2_SIZE + sizeof(fdt32_t)) +#define FDT_V16_SIZE FDT_V3_SIZE +#define FDT_V17_SIZE (FDT_V16_SIZE + sizeof(fdt32_t)) + +#endif /* FDT_H */ diff --git a/vendor/riscv-isa-sim/fdt/fdt.mk.in b/vendor/riscv-isa-sim/fdt/fdt.mk.in new file mode 100644 index 00000000..273375ef --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt.mk.in @@ -0,0 +1,17 @@ +fdt_subproject_deps = \ + +fdt_hdrs = \ + fdt.h \ + libfdt.h \ + libfdt_env.h \ + +fdt_c_srcs = \ + fdt.c \ + fdt_ro.c \ + fdt_wip.c \ + fdt_sw.c \ + fdt_rw.c \ + fdt_strerror.c \ + fdt_empty_tree.c \ + fdt_addresses.c \ + fdt_overlay.c \ diff --git a/vendor/riscv-isa-sim/fdt/fdt_addresses.c b/vendor/riscv-isa-sim/fdt/fdt_addresses.c new file mode 100644 index 00000000..9a82cd0b --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_addresses.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2014 David Gibson + * Copyright (C) 2018 embedded brains GmbH + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +static int fdt_cells(const void *fdt, int nodeoffset, const char *name) +{ + const fdt32_t *c; + uint32_t val; + int len; + + c = fdt_getprop(fdt, nodeoffset, name, &len); + if (!c) + return len; + + if (len != sizeof(*c)) + return -FDT_ERR_BADNCELLS; + + val = fdt32_to_cpu(*c); + if (val > FDT_MAX_NCELLS) + return -FDT_ERR_BADNCELLS; + + return (int)val; +} + +int fdt_address_cells(const void *fdt, int nodeoffset) +{ + int val; + + val = fdt_cells(fdt, nodeoffset, "#address-cells"); + if (val == 0) + return -FDT_ERR_BADNCELLS; + if (val == -FDT_ERR_NOTFOUND) + return 2; + return val; +} + +int fdt_size_cells(const void *fdt, int nodeoffset) +{ + int val; + + val = fdt_cells(fdt, nodeoffset, "#size-cells"); + if (val == -FDT_ERR_NOTFOUND) + return 1; + return val; +} + +/* This function assumes that [address|size]_cells is 1 or 2 */ +int fdt_appendprop_addrrange(void *fdt, int parent, int nodeoffset, + const char *name, uint64_t addr, uint64_t size) +{ + int addr_cells, size_cells, ret; + uint8_t data[sizeof(fdt64_t) * 2], *prop; + + ret = fdt_address_cells(fdt, parent); + if (ret < 0) + return ret; + addr_cells = ret; + + ret = fdt_size_cells(fdt, parent); + if (ret < 0) + return ret; + size_cells = ret; + + /* check validity of address */ + prop = data; + if (addr_cells == 1) { + if ((addr > UINT32_MAX) || ((UINT32_MAX + 1 - addr) < size)) + return -FDT_ERR_BADVALUE; + + fdt32_st(prop, (uint32_t)addr); + } else if (addr_cells == 2) { + fdt64_st(prop, addr); + } else { + return -FDT_ERR_BADNCELLS; + } + + /* check validity of size */ + prop += addr_cells * sizeof(fdt32_t); + if (size_cells == 1) { + if (size > UINT32_MAX) + return -FDT_ERR_BADVALUE; + + fdt32_st(prop, (uint32_t)size); + } else if (size_cells == 2) { + fdt64_st(prop, size); + } else { + return -FDT_ERR_BADNCELLS; + } + + return fdt_appendprop(fdt, nodeoffset, name, data, + (addr_cells + size_cells) * sizeof(fdt32_t)); +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_empty_tree.c b/vendor/riscv-isa-sim/fdt/fdt_empty_tree.c new file mode 100644 index 00000000..49d54d44 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_empty_tree.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2012 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +int fdt_create_empty_tree(void *buf, int bufsize) +{ + int err; + + err = fdt_create(buf, bufsize); + if (err) + return err; + + err = fdt_finish_reservemap(buf); + if (err) + return err; + + err = fdt_begin_node(buf, ""); + if (err) + return err; + + err = fdt_end_node(buf); + if (err) + return err; + + err = fdt_finish(buf); + if (err) + return err; + + return fdt_open_into(buf, buf, bufsize); +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_overlay.c b/vendor/riscv-isa-sim/fdt/fdt_overlay.c new file mode 100644 index 00000000..be718733 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_overlay.c @@ -0,0 +1,881 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2016 Free Electrons + * Copyright (C) 2016 NextThing Co. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +/** + * overlay_get_target_phandle - retrieves the target phandle of a fragment + * @fdto: pointer to the device tree overlay blob + * @fragment: node offset of the fragment in the overlay + * + * overlay_get_target_phandle() retrieves the target phandle of an + * overlay fragment when that fragment uses a phandle (target + * property) instead of a path (target-path property). + * + * returns: + * the phandle pointed by the target property + * 0, if the phandle was not found + * -1, if the phandle was malformed + */ +static uint32_t overlay_get_target_phandle(const void *fdto, int fragment) +{ + const fdt32_t *val; + int len; + + val = fdt_getprop(fdto, fragment, "target", &len); + if (!val) + return 0; + + if ((len != sizeof(*val)) || (fdt32_to_cpu(*val) == (uint32_t)-1)) + return (uint32_t)-1; + + return fdt32_to_cpu(*val); +} + +/** + * overlay_get_target - retrieves the offset of a fragment's target + * @fdt: Base device tree blob + * @fdto: Device tree overlay blob + * @fragment: node offset of the fragment in the overlay + * @pathp: pointer which receives the path of the target (or NULL) + * + * overlay_get_target() retrieves the target offset in the base + * device tree of a fragment, no matter how the actual targeting is + * done (through a phandle or a path) + * + * returns: + * the targeted node offset in the base device tree + * Negative error code on error + */ +static int overlay_get_target(const void *fdt, const void *fdto, + int fragment, char const **pathp) +{ + uint32_t phandle; + const char *path = NULL; + int path_len = 0, ret; + + /* Try first to do a phandle based lookup */ + phandle = overlay_get_target_phandle(fdto, fragment); + if (phandle == (uint32_t)-1) + return -FDT_ERR_BADPHANDLE; + + /* no phandle, try path */ + if (!phandle) { + /* And then a path based lookup */ + path = fdt_getprop(fdto, fragment, "target-path", &path_len); + if (path) + ret = fdt_path_offset(fdt, path); + else + ret = path_len; + } else + ret = fdt_node_offset_by_phandle(fdt, phandle); + + /* + * If we haven't found either a target or a + * target-path property in a node that contains a + * __overlay__ subnode (we wouldn't be called + * otherwise), consider it a improperly written + * overlay + */ + if (ret < 0 && path_len == -FDT_ERR_NOTFOUND) + ret = -FDT_ERR_BADOVERLAY; + + /* return on error */ + if (ret < 0) + return ret; + + /* return pointer to path (if available) */ + if (pathp) + *pathp = path ? path : NULL; + + return ret; +} + +/** + * overlay_phandle_add_offset - Increases a phandle by an offset + * @fdt: Base device tree blob + * @node: Device tree overlay blob + * @name: Name of the property to modify (phandle or linux,phandle) + * @delta: offset to apply + * + * overlay_phandle_add_offset() increments a node phandle by a given + * offset. + * + * returns: + * 0 on success. + * Negative error code on error + */ +static int overlay_phandle_add_offset(void *fdt, int node, + const char *name, uint32_t delta) +{ + const fdt32_t *val; + uint32_t adj_val; + int len; + + val = fdt_getprop(fdt, node, name, &len); + if (!val) + return len; + + if (len != sizeof(*val)) + return -FDT_ERR_BADPHANDLE; + + adj_val = fdt32_to_cpu(*val); + if ((adj_val + delta) < adj_val) + return -FDT_ERR_NOPHANDLES; + + adj_val += delta; + if (adj_val == (uint32_t)-1) + return -FDT_ERR_NOPHANDLES; + + return fdt_setprop_inplace_u32(fdt, node, name, adj_val); +} + +/** + * overlay_adjust_node_phandles - Offsets the phandles of a node + * @fdto: Device tree overlay blob + * @node: Offset of the node we want to adjust + * @delta: Offset to shift the phandles of + * + * overlay_adjust_node_phandles() adds a constant to all the phandles + * of a given node. This is mainly use as part of the overlay + * application process, when we want to update all the overlay + * phandles to not conflict with the overlays of the base device tree. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_adjust_node_phandles(void *fdto, int node, + uint32_t delta) +{ + int child; + int ret; + + ret = overlay_phandle_add_offset(fdto, node, "phandle", delta); + if (ret && ret != -FDT_ERR_NOTFOUND) + return ret; + + ret = overlay_phandle_add_offset(fdto, node, "linux,phandle", delta); + if (ret && ret != -FDT_ERR_NOTFOUND) + return ret; + + fdt_for_each_subnode(child, fdto, node) { + ret = overlay_adjust_node_phandles(fdto, child, delta); + if (ret) + return ret; + } + + return 0; +} + +/** + * overlay_adjust_local_phandles - Adjust the phandles of a whole overlay + * @fdto: Device tree overlay blob + * @delta: Offset to shift the phandles of + * + * overlay_adjust_local_phandles() adds a constant to all the + * phandles of an overlay. This is mainly use as part of the overlay + * application process, when we want to update all the overlay + * phandles to not conflict with the overlays of the base device tree. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_adjust_local_phandles(void *fdto, uint32_t delta) +{ + /* + * Start adjusting the phandles from the overlay root + */ + return overlay_adjust_node_phandles(fdto, 0, delta); +} + +/** + * overlay_update_local_node_references - Adjust the overlay references + * @fdto: Device tree overlay blob + * @tree_node: Node offset of the node to operate on + * @fixup_node: Node offset of the matching local fixups node + * @delta: Offset to shift the phandles of + * + * overlay_update_local_nodes_references() update the phandles + * pointing to a node within the device tree overlay by adding a + * constant delta. + * + * This is mainly used as part of a device tree application process, + * where you want the device tree overlays phandles to not conflict + * with the ones from the base device tree before merging them. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_update_local_node_references(void *fdto, + int tree_node, + int fixup_node, + uint32_t delta) +{ + int fixup_prop; + int fixup_child; + int ret; + + fdt_for_each_property_offset(fixup_prop, fdto, fixup_node) { + const fdt32_t *fixup_val; + const char *tree_val; + const char *name; + int fixup_len; + int tree_len; + int i; + + fixup_val = fdt_getprop_by_offset(fdto, fixup_prop, + &name, &fixup_len); + if (!fixup_val) + return fixup_len; + + if (fixup_len % sizeof(uint32_t)) + return -FDT_ERR_BADOVERLAY; + + tree_val = fdt_getprop(fdto, tree_node, name, &tree_len); + if (!tree_val) { + if (tree_len == -FDT_ERR_NOTFOUND) + return -FDT_ERR_BADOVERLAY; + + return tree_len; + } + + for (i = 0; i < (fixup_len / sizeof(uint32_t)); i++) { + fdt32_t adj_val; + uint32_t poffset; + + poffset = fdt32_to_cpu(fixup_val[i]); + + /* + * phandles to fixup can be unaligned. + * + * Use a memcpy for the architectures that do + * not support unaligned accesses. + */ + memcpy(&adj_val, tree_val + poffset, sizeof(adj_val)); + + adj_val = cpu_to_fdt32(fdt32_to_cpu(adj_val) + delta); + + ret = fdt_setprop_inplace_namelen_partial(fdto, + tree_node, + name, + strlen(name), + poffset, + &adj_val, + sizeof(adj_val)); + if (ret == -FDT_ERR_NOSPACE) + return -FDT_ERR_BADOVERLAY; + + if (ret) + return ret; + } + } + + fdt_for_each_subnode(fixup_child, fdto, fixup_node) { + const char *fixup_child_name = fdt_get_name(fdto, fixup_child, + NULL); + int tree_child; + + tree_child = fdt_subnode_offset(fdto, tree_node, + fixup_child_name); + if (tree_child == -FDT_ERR_NOTFOUND) + return -FDT_ERR_BADOVERLAY; + if (tree_child < 0) + return tree_child; + + ret = overlay_update_local_node_references(fdto, + tree_child, + fixup_child, + delta); + if (ret) + return ret; + } + + return 0; +} + +/** + * overlay_update_local_references - Adjust the overlay references + * @fdto: Device tree overlay blob + * @delta: Offset to shift the phandles of + * + * overlay_update_local_references() update all the phandles pointing + * to a node within the device tree overlay by adding a constant + * delta to not conflict with the base overlay. + * + * This is mainly used as part of a device tree application process, + * where you want the device tree overlays phandles to not conflict + * with the ones from the base device tree before merging them. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_update_local_references(void *fdto, uint32_t delta) +{ + int fixups; + + fixups = fdt_path_offset(fdto, "/__local_fixups__"); + if (fixups < 0) { + /* There's no local phandles to adjust, bail out */ + if (fixups == -FDT_ERR_NOTFOUND) + return 0; + + return fixups; + } + + /* + * Update our local references from the root of the tree + */ + return overlay_update_local_node_references(fdto, 0, fixups, + delta); +} + +/** + * overlay_fixup_one_phandle - Set an overlay phandle to the base one + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * @symbols_off: Node offset of the symbols node in the base device tree + * @path: Path to a node holding a phandle in the overlay + * @path_len: number of path characters to consider + * @name: Name of the property holding the phandle reference in the overlay + * @name_len: number of name characters to consider + * @poffset: Offset within the overlay property where the phandle is stored + * @label: Label of the node referenced by the phandle + * + * overlay_fixup_one_phandle() resolves an overlay phandle pointing to + * a node in the base device tree. + * + * This is part of the device tree overlay application process, when + * you want all the phandles in the overlay to point to the actual + * base dt nodes. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_fixup_one_phandle(void *fdt, void *fdto, + int symbols_off, + const char *path, uint32_t path_len, + const char *name, uint32_t name_len, + int poffset, const char *label) +{ + const char *symbol_path; + uint32_t phandle; + fdt32_t phandle_prop; + int symbol_off, fixup_off; + int prop_len; + + if (symbols_off < 0) + return symbols_off; + + symbol_path = fdt_getprop(fdt, symbols_off, label, + &prop_len); + if (!symbol_path) + return prop_len; + + symbol_off = fdt_path_offset(fdt, symbol_path); + if (symbol_off < 0) + return symbol_off; + + phandle = fdt_get_phandle(fdt, symbol_off); + if (!phandle) + return -FDT_ERR_NOTFOUND; + + fixup_off = fdt_path_offset_namelen(fdto, path, path_len); + if (fixup_off == -FDT_ERR_NOTFOUND) + return -FDT_ERR_BADOVERLAY; + if (fixup_off < 0) + return fixup_off; + + phandle_prop = cpu_to_fdt32(phandle); + return fdt_setprop_inplace_namelen_partial(fdto, fixup_off, + name, name_len, poffset, + &phandle_prop, + sizeof(phandle_prop)); +}; + +/** + * overlay_fixup_phandle - Set an overlay phandle to the base one + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * @symbols_off: Node offset of the symbols node in the base device tree + * @property: Property offset in the overlay holding the list of fixups + * + * overlay_fixup_phandle() resolves all the overlay phandles pointed + * to in a __fixups__ property, and updates them to match the phandles + * in use in the base device tree. + * + * This is part of the device tree overlay application process, when + * you want all the phandles in the overlay to point to the actual + * base dt nodes. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_fixup_phandle(void *fdt, void *fdto, int symbols_off, + int property) +{ + const char *value; + const char *label; + int len; + + value = fdt_getprop_by_offset(fdto, property, + &label, &len); + if (!value) { + if (len == -FDT_ERR_NOTFOUND) + return -FDT_ERR_INTERNAL; + + return len; + } + + do { + const char *path, *name, *fixup_end; + const char *fixup_str = value; + uint32_t path_len, name_len; + uint32_t fixup_len; + char *sep, *endptr; + int poffset, ret; + + fixup_end = memchr(value, '\0', len); + if (!fixup_end) + return -FDT_ERR_BADOVERLAY; + fixup_len = fixup_end - fixup_str; + + len -= fixup_len + 1; + value += fixup_len + 1; + + path = fixup_str; + sep = memchr(fixup_str, ':', fixup_len); + if (!sep || *sep != ':') + return -FDT_ERR_BADOVERLAY; + + path_len = sep - path; + if (path_len == (fixup_len - 1)) + return -FDT_ERR_BADOVERLAY; + + fixup_len -= path_len + 1; + name = sep + 1; + sep = memchr(name, ':', fixup_len); + if (!sep || *sep != ':') + return -FDT_ERR_BADOVERLAY; + + name_len = sep - name; + if (!name_len) + return -FDT_ERR_BADOVERLAY; + + poffset = strtoul(sep + 1, &endptr, 10); + if ((*endptr != '\0') || (endptr <= (sep + 1))) + return -FDT_ERR_BADOVERLAY; + + ret = overlay_fixup_one_phandle(fdt, fdto, symbols_off, + path, path_len, name, name_len, + poffset, label); + if (ret) + return ret; + } while (len > 0); + + return 0; +} + +/** + * overlay_fixup_phandles - Resolve the overlay phandles to the base + * device tree + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * + * overlay_fixup_phandles() resolves all the overlay phandles pointing + * to nodes in the base device tree. + * + * This is one of the steps of the device tree overlay application + * process, when you want all the phandles in the overlay to point to + * the actual base dt nodes. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_fixup_phandles(void *fdt, void *fdto) +{ + int fixups_off, symbols_off; + int property; + + /* We can have overlays without any fixups */ + fixups_off = fdt_path_offset(fdto, "/__fixups__"); + if (fixups_off == -FDT_ERR_NOTFOUND) + return 0; /* nothing to do */ + if (fixups_off < 0) + return fixups_off; + + /* And base DTs without symbols */ + symbols_off = fdt_path_offset(fdt, "/__symbols__"); + if ((symbols_off < 0 && (symbols_off != -FDT_ERR_NOTFOUND))) + return symbols_off; + + fdt_for_each_property_offset(property, fdto, fixups_off) { + int ret; + + ret = overlay_fixup_phandle(fdt, fdto, symbols_off, property); + if (ret) + return ret; + } + + return 0; +} + +/** + * overlay_apply_node - Merges a node into the base device tree + * @fdt: Base Device Tree blob + * @target: Node offset in the base device tree to apply the fragment to + * @fdto: Device tree overlay blob + * @node: Node offset in the overlay holding the changes to merge + * + * overlay_apply_node() merges a node into a target base device tree + * node pointed. + * + * This is part of the final step in the device tree overlay + * application process, when all the phandles have been adjusted and + * resolved and you just have to merge overlay into the base device + * tree. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_apply_node(void *fdt, int target, + void *fdto, int node) +{ + int property; + int subnode; + + fdt_for_each_property_offset(property, fdto, node) { + const char *name; + const void *prop; + int prop_len; + int ret; + + prop = fdt_getprop_by_offset(fdto, property, &name, + &prop_len); + if (prop_len == -FDT_ERR_NOTFOUND) + return -FDT_ERR_INTERNAL; + if (prop_len < 0) + return prop_len; + + ret = fdt_setprop(fdt, target, name, prop, prop_len); + if (ret) + return ret; + } + + fdt_for_each_subnode(subnode, fdto, node) { + const char *name = fdt_get_name(fdto, subnode, NULL); + int nnode; + int ret; + + nnode = fdt_add_subnode(fdt, target, name); + if (nnode == -FDT_ERR_EXISTS) { + nnode = fdt_subnode_offset(fdt, target, name); + if (nnode == -FDT_ERR_NOTFOUND) + return -FDT_ERR_INTERNAL; + } + + if (nnode < 0) + return nnode; + + ret = overlay_apply_node(fdt, nnode, fdto, subnode); + if (ret) + return ret; + } + + return 0; +} + +/** + * overlay_merge - Merge an overlay into its base device tree + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * + * overlay_merge() merges an overlay into its base device tree. + * + * This is the next to last step in the device tree overlay application + * process, when all the phandles have been adjusted and resolved and + * you just have to merge overlay into the base device tree. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_merge(void *fdt, void *fdto) +{ + int fragment; + + fdt_for_each_subnode(fragment, fdto, 0) { + int overlay; + int target; + int ret; + + /* + * Each fragments will have an __overlay__ node. If + * they don't, it's not supposed to be merged + */ + overlay = fdt_subnode_offset(fdto, fragment, "__overlay__"); + if (overlay == -FDT_ERR_NOTFOUND) + continue; + + if (overlay < 0) + return overlay; + + target = overlay_get_target(fdt, fdto, fragment, NULL); + if (target < 0) + return target; + + ret = overlay_apply_node(fdt, target, fdto, overlay); + if (ret) + return ret; + } + + return 0; +} + +static int get_path_len(const void *fdt, int nodeoffset) +{ + int len = 0, namelen; + const char *name; + + FDT_RO_PROBE(fdt); + + for (;;) { + name = fdt_get_name(fdt, nodeoffset, &namelen); + if (!name) + return namelen; + + /* root? we're done */ + if (namelen == 0) + break; + + nodeoffset = fdt_parent_offset(fdt, nodeoffset); + if (nodeoffset < 0) + return nodeoffset; + len += namelen + 1; + } + + /* in case of root pretend it's "/" */ + if (len == 0) + len++; + return len; +} + +/** + * overlay_symbol_update - Update the symbols of base tree after a merge + * @fdt: Base Device Tree blob + * @fdto: Device tree overlay blob + * + * overlay_symbol_update() updates the symbols of the base tree with the + * symbols of the applied overlay + * + * This is the last step in the device tree overlay application + * process, allowing the reference of overlay symbols by subsequent + * overlay operations. + * + * returns: + * 0 on success + * Negative error code on failure + */ +static int overlay_symbol_update(void *fdt, void *fdto) +{ + int root_sym, ov_sym, prop, path_len, fragment, target; + int len, frag_name_len, ret, rel_path_len; + const char *s, *e; + const char *path; + const char *name; + const char *frag_name; + const char *rel_path; + const char *target_path; + char *buf; + void *p; + + ov_sym = fdt_subnode_offset(fdto, 0, "__symbols__"); + + /* if no overlay symbols exist no problem */ + if (ov_sym < 0) + return 0; + + root_sym = fdt_subnode_offset(fdt, 0, "__symbols__"); + + /* it no root symbols exist we should create them */ + if (root_sym == -FDT_ERR_NOTFOUND) + root_sym = fdt_add_subnode(fdt, 0, "__symbols__"); + + /* any error is fatal now */ + if (root_sym < 0) + return root_sym; + + /* iterate over each overlay symbol */ + fdt_for_each_property_offset(prop, fdto, ov_sym) { + path = fdt_getprop_by_offset(fdto, prop, &name, &path_len); + if (!path) + return path_len; + + /* verify it's a string property (terminated by a single \0) */ + if (path_len < 1 || memchr(path, '\0', path_len) != &path[path_len - 1]) + return -FDT_ERR_BADVALUE; + + /* keep end marker to avoid strlen() */ + e = path + path_len; + + if (*path != '/') + return -FDT_ERR_BADVALUE; + + /* get fragment name first */ + s = strchr(path + 1, '/'); + if (!s) { + /* Symbol refers to something that won't end + * up in the target tree */ + continue; + } + + frag_name = path + 1; + frag_name_len = s - path - 1; + + /* verify format; safe since "s" lies in \0 terminated prop */ + len = sizeof("/__overlay__/") - 1; + if ((e - s) > len && (memcmp(s, "/__overlay__/", len) == 0)) { + /* //__overlay__/ */ + rel_path = s + len; + rel_path_len = e - rel_path; + } else if ((e - s) == len + && (memcmp(s, "/__overlay__", len - 1) == 0)) { + /* //__overlay__ */ + rel_path = ""; + rel_path_len = 0; + } else { + /* Symbol refers to something that won't end + * up in the target tree */ + continue; + } + + /* find the fragment index in which the symbol lies */ + ret = fdt_subnode_offset_namelen(fdto, 0, frag_name, + frag_name_len); + /* not found? */ + if (ret < 0) + return -FDT_ERR_BADOVERLAY; + fragment = ret; + + /* an __overlay__ subnode must exist */ + ret = fdt_subnode_offset(fdto, fragment, "__overlay__"); + if (ret < 0) + return -FDT_ERR_BADOVERLAY; + + /* get the target of the fragment */ + ret = overlay_get_target(fdt, fdto, fragment, &target_path); + if (ret < 0) + return ret; + target = ret; + + /* if we have a target path use */ + if (!target_path) { + ret = get_path_len(fdt, target); + if (ret < 0) + return ret; + len = ret; + } else { + len = strlen(target_path); + } + + ret = fdt_setprop_placeholder(fdt, root_sym, name, + len + (len > 1) + rel_path_len + 1, &p); + if (ret < 0) + return ret; + + if (!target_path) { + /* again in case setprop_placeholder changed it */ + ret = overlay_get_target(fdt, fdto, fragment, &target_path); + if (ret < 0) + return ret; + target = ret; + } + + buf = p; + if (len > 1) { /* target is not root */ + if (!target_path) { + ret = fdt_get_path(fdt, target, buf, len + 1); + if (ret < 0) + return ret; + } else + memcpy(buf, target_path, len + 1); + + } else + len--; + + buf[len] = '/'; + memcpy(buf + len + 1, rel_path, rel_path_len); + buf[len + 1 + rel_path_len] = '\0'; + } + + return 0; +} + +int fdt_overlay_apply(void *fdt, void *fdto) +{ + uint32_t delta; + int ret; + + FDT_RO_PROBE(fdt); + FDT_RO_PROBE(fdto); + + ret = fdt_find_max_phandle(fdt, &delta); + if (ret) + goto err; + + ret = overlay_adjust_local_phandles(fdto, delta); + if (ret) + goto err; + + ret = overlay_update_local_references(fdto, delta); + if (ret) + goto err; + + ret = overlay_fixup_phandles(fdt, fdto); + if (ret) + goto err; + + ret = overlay_merge(fdt, fdto); + if (ret) + goto err; + + ret = overlay_symbol_update(fdt, fdto); + if (ret) + goto err; + + /* + * The overlay has been damaged, erase its magic. + */ + fdt_set_magic(fdto, ~0); + + return 0; + +err: + /* + * The overlay might have been damaged, erase its magic. + */ + fdt_set_magic(fdto, ~0); + + /* + * The base device tree might have been damaged, erase its + * magic. + */ + fdt_set_magic(fdt, ~0); + + return ret; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_ro.c b/vendor/riscv-isa-sim/fdt/fdt_ro.c new file mode 100644 index 00000000..a5c2797c --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_ro.c @@ -0,0 +1,898 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +static int fdt_nodename_eq_(const void *fdt, int offset, + const char *s, int len) +{ + int olen; + const char *p = fdt_get_name(fdt, offset, &olen); + + if (!p || olen < len) + /* short match */ + return 0; + + if (memcmp(p, s, len) != 0) + return 0; + + if (p[len] == '\0') + return 1; + else if (!memchr(s, '@', len) && (p[len] == '@')) + return 1; + else + return 0; +} + +const char *fdt_get_string(const void *fdt, int stroffset, int *lenp) +{ + int32_t totalsize = fdt_ro_probe_(fdt); + uint32_t absoffset = stroffset + fdt_off_dt_strings(fdt); + size_t len; + int err; + const char *s, *n; + + err = totalsize; + if (totalsize < 0) + goto fail; + + err = -FDT_ERR_BADOFFSET; + if (absoffset >= totalsize) + goto fail; + len = totalsize - absoffset; + + if (fdt_magic(fdt) == FDT_MAGIC) { + if (stroffset < 0) + goto fail; + if (fdt_version(fdt) >= 17) { + if (stroffset >= fdt_size_dt_strings(fdt)) + goto fail; + if ((fdt_size_dt_strings(fdt) - stroffset) < len) + len = fdt_size_dt_strings(fdt) - stroffset; + } + } else if (fdt_magic(fdt) == FDT_SW_MAGIC) { + if ((stroffset >= 0) + || (stroffset < -fdt_size_dt_strings(fdt))) + goto fail; + if ((-stroffset) < len) + len = -stroffset; + } else { + err = -FDT_ERR_INTERNAL; + goto fail; + } + + s = (const char *)fdt + absoffset; + n = memchr(s, '\0', len); + if (!n) { + /* missing terminating NULL */ + err = -FDT_ERR_TRUNCATED; + goto fail; + } + + if (lenp) + *lenp = n - s; + return s; + +fail: + if (lenp) + *lenp = err; + return NULL; +} + +const char *fdt_string(const void *fdt, int stroffset) +{ + return fdt_get_string(fdt, stroffset, NULL); +} + +static int fdt_string_eq_(const void *fdt, int stroffset, + const char *s, int len) +{ + int slen; + const char *p = fdt_get_string(fdt, stroffset, &slen); + + return p && (slen == len) && (memcmp(p, s, len) == 0); +} + +int fdt_find_max_phandle(const void *fdt, uint32_t *phandle) +{ + uint32_t max = 0; + int offset = -1; + + while (true) { + uint32_t value; + + offset = fdt_next_node(fdt, offset, NULL); + if (offset < 0) { + if (offset == -FDT_ERR_NOTFOUND) + break; + + return offset; + } + + value = fdt_get_phandle(fdt, offset); + + if (value > max) + max = value; + } + + if (phandle) + *phandle = max; + + return 0; +} + +int fdt_generate_phandle(const void *fdt, uint32_t *phandle) +{ + uint32_t max; + int err; + + err = fdt_find_max_phandle(fdt, &max); + if (err < 0) + return err; + + if (max == FDT_MAX_PHANDLE) + return -FDT_ERR_NOPHANDLES; + + if (phandle) + *phandle = max + 1; + + return 0; +} + +static const struct fdt_reserve_entry *fdt_mem_rsv(const void *fdt, int n) +{ + int offset = n * sizeof(struct fdt_reserve_entry); + int absoffset = fdt_off_mem_rsvmap(fdt) + offset; + + if (absoffset < fdt_off_mem_rsvmap(fdt)) + return NULL; + if (absoffset > fdt_totalsize(fdt) - sizeof(struct fdt_reserve_entry)) + return NULL; + return fdt_mem_rsv_(fdt, n); +} + +int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size) +{ + const struct fdt_reserve_entry *re; + + FDT_RO_PROBE(fdt); + re = fdt_mem_rsv(fdt, n); + if (!re) + return -FDT_ERR_BADOFFSET; + + *address = fdt64_ld(&re->address); + *size = fdt64_ld(&re->size); + return 0; +} + +int fdt_num_mem_rsv(const void *fdt) +{ + int i; + const struct fdt_reserve_entry *re; + + for (i = 0; (re = fdt_mem_rsv(fdt, i)) != NULL; i++) { + if (fdt64_ld(&re->size) == 0) + return i; + } + return -FDT_ERR_TRUNCATED; +} + +static int nextprop_(const void *fdt, int offset) +{ + uint32_t tag; + int nextoffset; + + do { + tag = fdt_next_tag(fdt, offset, &nextoffset); + + switch (tag) { + case FDT_END: + if (nextoffset >= 0) + return -FDT_ERR_BADSTRUCTURE; + else + return nextoffset; + + case FDT_PROP: + return offset; + } + offset = nextoffset; + } while (tag == FDT_NOP); + + return -FDT_ERR_NOTFOUND; +} + +int fdt_subnode_offset_namelen(const void *fdt, int offset, + const char *name, int namelen) +{ + int depth; + + FDT_RO_PROBE(fdt); + + for (depth = 0; + (offset >= 0) && (depth >= 0); + offset = fdt_next_node(fdt, offset, &depth)) + if ((depth == 1) + && fdt_nodename_eq_(fdt, offset, name, namelen)) + return offset; + + if (depth < 0) + return -FDT_ERR_NOTFOUND; + return offset; /* error */ +} + +int fdt_subnode_offset(const void *fdt, int parentoffset, + const char *name) +{ + return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name)); +} + +int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen) +{ + const char *end = path + namelen; + const char *p = path; + int offset = 0; + + FDT_RO_PROBE(fdt); + + /* see if we have an alias */ + if (*path != '/') { + const char *q = memchr(path, '/', end - p); + + if (!q) + q = end; + + p = fdt_get_alias_namelen(fdt, p, q - p); + if (!p) + return -FDT_ERR_BADPATH; + offset = fdt_path_offset(fdt, p); + + p = q; + } + + while (p < end) { + const char *q; + + while (*p == '/') { + p++; + if (p == end) + return offset; + } + q = memchr(p, '/', end - p); + if (! q) + q = end; + + offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p); + if (offset < 0) + return offset; + + p = q; + } + + return offset; +} + +int fdt_path_offset(const void *fdt, const char *path) +{ + return fdt_path_offset_namelen(fdt, path, strlen(path)); +} + +const char *fdt_get_name(const void *fdt, int nodeoffset, int *len) +{ + const struct fdt_node_header *nh = fdt_offset_ptr_(fdt, nodeoffset); + const char *nameptr; + int err; + + if (((err = fdt_ro_probe_(fdt)) < 0) + || ((err = fdt_check_node_offset_(fdt, nodeoffset)) < 0)) + goto fail; + + nameptr = nh->name; + + if (fdt_version(fdt) < 0x10) { + /* + * For old FDT versions, match the naming conventions of V16: + * give only the leaf name (after all /). The actual tree + * contents are loosely checked. + */ + const char *leaf; + leaf = strrchr(nameptr, '/'); + if (leaf == NULL) { + err = -FDT_ERR_BADSTRUCTURE; + goto fail; + } + nameptr = leaf+1; + } + + if (len) + *len = strlen(nameptr); + + return nameptr; + + fail: + if (len) + *len = err; + return NULL; +} + +int fdt_first_property_offset(const void *fdt, int nodeoffset) +{ + int offset; + + if ((offset = fdt_check_node_offset_(fdt, nodeoffset)) < 0) + return offset; + + return nextprop_(fdt, offset); +} + +int fdt_next_property_offset(const void *fdt, int offset) +{ + if ((offset = fdt_check_prop_offset_(fdt, offset)) < 0) + return offset; + + return nextprop_(fdt, offset); +} + +static const struct fdt_property *fdt_get_property_by_offset_(const void *fdt, + int offset, + int *lenp) +{ + int err; + const struct fdt_property *prop; + + if ((err = fdt_check_prop_offset_(fdt, offset)) < 0) { + if (lenp) + *lenp = err; + return NULL; + } + + prop = fdt_offset_ptr_(fdt, offset); + + if (lenp) + *lenp = fdt32_ld(&prop->len); + + return prop; +} + +const struct fdt_property *fdt_get_property_by_offset(const void *fdt, + int offset, + int *lenp) +{ + /* Prior to version 16, properties may need realignment + * and this API does not work. fdt_getprop_*() will, however. */ + + if (fdt_version(fdt) < 0x10) { + if (lenp) + *lenp = -FDT_ERR_BADVERSION; + return NULL; + } + + return fdt_get_property_by_offset_(fdt, offset, lenp); +} + +static const struct fdt_property *fdt_get_property_namelen_(const void *fdt, + int offset, + const char *name, + int namelen, + int *lenp, + int *poffset) +{ + for (offset = fdt_first_property_offset(fdt, offset); + (offset >= 0); + (offset = fdt_next_property_offset(fdt, offset))) { + const struct fdt_property *prop; + + if (!(prop = fdt_get_property_by_offset_(fdt, offset, lenp))) { + offset = -FDT_ERR_INTERNAL; + break; + } + if (fdt_string_eq_(fdt, fdt32_ld(&prop->nameoff), + name, namelen)) { + if (poffset) + *poffset = offset; + return prop; + } + } + + if (lenp) + *lenp = offset; + return NULL; +} + + +const struct fdt_property *fdt_get_property_namelen(const void *fdt, + int offset, + const char *name, + int namelen, int *lenp) +{ + /* Prior to version 16, properties may need realignment + * and this API does not work. fdt_getprop_*() will, however. */ + if (fdt_version(fdt) < 0x10) { + if (lenp) + *lenp = -FDT_ERR_BADVERSION; + return NULL; + } + + return fdt_get_property_namelen_(fdt, offset, name, namelen, lenp, + NULL); +} + + +const struct fdt_property *fdt_get_property(const void *fdt, + int nodeoffset, + const char *name, int *lenp) +{ + return fdt_get_property_namelen(fdt, nodeoffset, name, + strlen(name), lenp); +} + +const void *fdt_getprop_namelen(const void *fdt, int nodeoffset, + const char *name, int namelen, int *lenp) +{ + int poffset; + const struct fdt_property *prop; + + prop = fdt_get_property_namelen_(fdt, nodeoffset, name, namelen, lenp, + &poffset); + if (!prop) + return NULL; + + /* Handle realignment */ + if (fdt_version(fdt) < 0x10 && (poffset + sizeof(*prop)) % 8 && + fdt32_ld(&prop->len) >= 8) + return prop->data + 4; + return prop->data; +} + +const void *fdt_getprop_by_offset(const void *fdt, int offset, + const char **namep, int *lenp) +{ + const struct fdt_property *prop; + + prop = fdt_get_property_by_offset_(fdt, offset, lenp); + if (!prop) + return NULL; + if (namep) { + const char *name; + int namelen; + name = fdt_get_string(fdt, fdt32_ld(&prop->nameoff), + &namelen); + if (!name) { + if (lenp) + *lenp = namelen; + return NULL; + } + *namep = name; + } + + /* Handle realignment */ + if (fdt_version(fdt) < 0x10 && (offset + sizeof(*prop)) % 8 && + fdt32_ld(&prop->len) >= 8) + return prop->data + 4; + return prop->data; +} + +const void *fdt_getprop(const void *fdt, int nodeoffset, + const char *name, int *lenp) +{ + return fdt_getprop_namelen(fdt, nodeoffset, name, strlen(name), lenp); +} + +uint32_t fdt_get_phandle(const void *fdt, int nodeoffset) +{ + const fdt32_t *php; + int len; + + /* FIXME: This is a bit sub-optimal, since we potentially scan + * over all the properties twice. */ + php = fdt_getprop(fdt, nodeoffset, "phandle", &len); + if (!php || (len != sizeof(*php))) { + php = fdt_getprop(fdt, nodeoffset, "linux,phandle", &len); + if (!php || (len != sizeof(*php))) + return 0; + } + + return fdt32_ld(php); +} + +const char *fdt_get_alias_namelen(const void *fdt, + const char *name, int namelen) +{ + int aliasoffset; + + aliasoffset = fdt_path_offset(fdt, "/aliases"); + if (aliasoffset < 0) + return NULL; + + return fdt_getprop_namelen(fdt, aliasoffset, name, namelen, NULL); +} + +const char *fdt_get_alias(const void *fdt, const char *name) +{ + return fdt_get_alias_namelen(fdt, name, strlen(name)); +} + +int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen) +{ + int pdepth = 0, p = 0; + int offset, depth, namelen; + const char *name; + + FDT_RO_PROBE(fdt); + + if (buflen < 2) + return -FDT_ERR_NOSPACE; + + for (offset = 0, depth = 0; + (offset >= 0) && (offset <= nodeoffset); + offset = fdt_next_node(fdt, offset, &depth)) { + while (pdepth > depth) { + do { + p--; + } while (buf[p-1] != '/'); + pdepth--; + } + + if (pdepth >= depth) { + name = fdt_get_name(fdt, offset, &namelen); + if (!name) + return namelen; + if ((p + namelen + 1) <= buflen) { + memcpy(buf + p, name, namelen); + p += namelen; + buf[p++] = '/'; + pdepth++; + } + } + + if (offset == nodeoffset) { + if (pdepth < (depth + 1)) + return -FDT_ERR_NOSPACE; + + if (p > 1) /* special case so that root path is "/", not "" */ + p--; + buf[p] = '\0'; + return 0; + } + } + + if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0)) + return -FDT_ERR_BADOFFSET; + else if (offset == -FDT_ERR_BADOFFSET) + return -FDT_ERR_BADSTRUCTURE; + + return offset; /* error from fdt_next_node() */ +} + +int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset, + int supernodedepth, int *nodedepth) +{ + int offset, depth; + int supernodeoffset = -FDT_ERR_INTERNAL; + + FDT_RO_PROBE(fdt); + + if (supernodedepth < 0) + return -FDT_ERR_NOTFOUND; + + for (offset = 0, depth = 0; + (offset >= 0) && (offset <= nodeoffset); + offset = fdt_next_node(fdt, offset, &depth)) { + if (depth == supernodedepth) + supernodeoffset = offset; + + if (offset == nodeoffset) { + if (nodedepth) + *nodedepth = depth; + + if (supernodedepth > depth) + return -FDT_ERR_NOTFOUND; + else + return supernodeoffset; + } + } + + if ((offset == -FDT_ERR_NOTFOUND) || (offset >= 0)) + return -FDT_ERR_BADOFFSET; + else if (offset == -FDT_ERR_BADOFFSET) + return -FDT_ERR_BADSTRUCTURE; + + return offset; /* error from fdt_next_node() */ +} + +int fdt_node_depth(const void *fdt, int nodeoffset) +{ + int nodedepth; + int err; + + err = fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, &nodedepth); + if (err) + return (err < 0) ? err : -FDT_ERR_INTERNAL; + return nodedepth; +} + +int fdt_parent_offset(const void *fdt, int nodeoffset) +{ + int nodedepth = fdt_node_depth(fdt, nodeoffset); + + if (nodedepth < 0) + return nodedepth; + return fdt_supernode_atdepth_offset(fdt, nodeoffset, + nodedepth - 1, NULL); +} + +int fdt_node_offset_by_prop_value(const void *fdt, int startoffset, + const char *propname, + const void *propval, int proplen) +{ + int offset; + const void *val; + int len; + + FDT_RO_PROBE(fdt); + + /* FIXME: The algorithm here is pretty horrible: we scan each + * property of a node in fdt_getprop(), then if that didn't + * find what we want, we scan over them again making our way + * to the next node. Still it's the easiest to implement + * approach; performance can come later. */ + for (offset = fdt_next_node(fdt, startoffset, NULL); + offset >= 0; + offset = fdt_next_node(fdt, offset, NULL)) { + val = fdt_getprop(fdt, offset, propname, &len); + if (val && (len == proplen) + && (memcmp(val, propval, len) == 0)) + return offset; + } + + return offset; /* error from fdt_next_node() */ +} + +int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle) +{ + int offset; + + if ((phandle == 0) || (phandle == -1)) + return -FDT_ERR_BADPHANDLE; + + FDT_RO_PROBE(fdt); + + /* FIXME: The algorithm here is pretty horrible: we + * potentially scan each property of a node in + * fdt_get_phandle(), then if that didn't find what + * we want, we scan over them again making our way to the next + * node. Still it's the easiest to implement approach; + * performance can come later. */ + for (offset = fdt_next_node(fdt, -1, NULL); + offset >= 0; + offset = fdt_next_node(fdt, offset, NULL)) { + if (fdt_get_phandle(fdt, offset) == phandle) + return offset; + } + + return offset; /* error from fdt_next_node() */ +} + +int fdt_stringlist_contains(const char *strlist, int listlen, const char *str) +{ + int len = strlen(str); + const char *p; + + while (listlen >= len) { + if (memcmp(str, strlist, len+1) == 0) + return 1; + p = memchr(strlist, '\0', listlen); + if (!p) + return 0; /* malformed strlist.. */ + listlen -= (p-strlist) + 1; + strlist = p + 1; + } + return 0; +} + +int fdt_stringlist_count(const void *fdt, int nodeoffset, const char *property) +{ + const char *list, *end; + int length, count = 0; + + list = fdt_getprop(fdt, nodeoffset, property, &length); + if (!list) + return length; + + end = list + length; + + while (list < end) { + length = strnlen(list, end - list) + 1; + + /* Abort if the last string isn't properly NUL-terminated. */ + if (list + length > end) + return -FDT_ERR_BADVALUE; + + list += length; + count++; + } + + return count; +} + +int fdt_stringlist_search(const void *fdt, int nodeoffset, const char *property, + const char *string) +{ + int length, len, idx = 0; + const char *list, *end; + + list = fdt_getprop(fdt, nodeoffset, property, &length); + if (!list) + return length; + + len = strlen(string) + 1; + end = list + length; + + while (list < end) { + length = strnlen(list, end - list) + 1; + + /* Abort if the last string isn't properly NUL-terminated. */ + if (list + length > end) + return -FDT_ERR_BADVALUE; + + if (length == len && memcmp(list, string, length) == 0) + return idx; + + list += length; + idx++; + } + + return -FDT_ERR_NOTFOUND; +} + +const char *fdt_stringlist_get(const void *fdt, int nodeoffset, + const char *property, int idx, + int *lenp) +{ + const char *list, *end; + int length; + + list = fdt_getprop(fdt, nodeoffset, property, &length); + if (!list) { + if (lenp) + *lenp = length; + + return NULL; + } + + end = list + length; + + while (list < end) { + length = strnlen(list, end - list) + 1; + + /* Abort if the last string isn't properly NUL-terminated. */ + if (list + length > end) { + if (lenp) + *lenp = -FDT_ERR_BADVALUE; + + return NULL; + } + + if (idx == 0) { + if (lenp) + *lenp = length - 1; + + return list; + } + + list += length; + idx--; + } + + if (lenp) + *lenp = -FDT_ERR_NOTFOUND; + + return NULL; +} + +int fdt_node_check_compatible(const void *fdt, int nodeoffset, + const char *compatible) +{ + const void *prop; + int len; + + prop = fdt_getprop(fdt, nodeoffset, "compatible", &len); + if (!prop) + return len; + + return !fdt_stringlist_contains(prop, len, compatible); +} + +int fdt_node_offset_by_compatible(const void *fdt, int startoffset, + const char *compatible) +{ + int offset, err; + + FDT_RO_PROBE(fdt); + + /* FIXME: The algorithm here is pretty horrible: we scan each + * property of a node in fdt_node_check_compatible(), then if + * that didn't find what we want, we scan over them again + * making our way to the next node. Still it's the easiest to + * implement approach; performance can come later. */ + for (offset = fdt_next_node(fdt, startoffset, NULL); + offset >= 0; + offset = fdt_next_node(fdt, offset, NULL)) { + err = fdt_node_check_compatible(fdt, offset, compatible); + if ((err < 0) && (err != -FDT_ERR_NOTFOUND)) + return err; + else if (err == 0) + return offset; + } + + return offset; /* error from fdt_next_node() */ +} + +int fdt_check_full(const void *fdt, size_t bufsize) +{ + int err; + int num_memrsv; + int offset, nextoffset = 0; + uint32_t tag; + unsigned depth = 0; + const void *prop; + const char *propname; + + if (bufsize < FDT_V1_SIZE) + return -FDT_ERR_TRUNCATED; + err = fdt_check_header(fdt); + if (err != 0) + return err; + if (bufsize < fdt_totalsize(fdt)) + return -FDT_ERR_TRUNCATED; + + num_memrsv = fdt_num_mem_rsv(fdt); + if (num_memrsv < 0) + return num_memrsv; + + while (1) { + offset = nextoffset; + tag = fdt_next_tag(fdt, offset, &nextoffset); + + if (nextoffset < 0) + return nextoffset; + + switch (tag) { + case FDT_NOP: + break; + + case FDT_END: + if (depth != 0) + return -FDT_ERR_BADSTRUCTURE; + return 0; + + case FDT_BEGIN_NODE: + depth++; + if (depth > INT_MAX) + return -FDT_ERR_BADSTRUCTURE; + break; + + case FDT_END_NODE: + if (depth == 0) + return -FDT_ERR_BADSTRUCTURE; + depth--; + break; + + case FDT_PROP: + prop = fdt_getprop_by_offset(fdt, offset, &propname, + &err); + if (!prop) + return err; + break; + + default: + return -FDT_ERR_INTERNAL; + } + } +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_rw.c b/vendor/riscv-isa-sim/fdt/fdt_rw.c new file mode 100644 index 00000000..8795947c --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_rw.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +static int fdt_blocks_misordered_(const void *fdt, + int mem_rsv_size, int struct_size) +{ + return (fdt_off_mem_rsvmap(fdt) < FDT_ALIGN(sizeof(struct fdt_header), 8)) + || (fdt_off_dt_struct(fdt) < + (fdt_off_mem_rsvmap(fdt) + mem_rsv_size)) + || (fdt_off_dt_strings(fdt) < + (fdt_off_dt_struct(fdt) + struct_size)) + || (fdt_totalsize(fdt) < + (fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt))); +} + +static int fdt_rw_probe_(void *fdt) +{ + FDT_RO_PROBE(fdt); + + if (fdt_version(fdt) < 17) + return -FDT_ERR_BADVERSION; + if (fdt_blocks_misordered_(fdt, sizeof(struct fdt_reserve_entry), + fdt_size_dt_struct(fdt))) + return -FDT_ERR_BADLAYOUT; + if (fdt_version(fdt) > 17) + fdt_set_version(fdt, 17); + + return 0; +} + +#define FDT_RW_PROBE(fdt) \ + { \ + int err_; \ + if ((err_ = fdt_rw_probe_(fdt)) != 0) \ + return err_; \ + } + +static inline int fdt_data_size_(void *fdt) +{ + return fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); +} + +static int fdt_splice_(void *fdt, void *splicepoint, int oldlen, int newlen) +{ + char *p = splicepoint; + char *end = (char *)fdt + fdt_data_size_(fdt); + + if (((p + oldlen) < p) || ((p + oldlen) > end)) + return -FDT_ERR_BADOFFSET; + if ((p < (char *)fdt) || ((end - oldlen + newlen) < (char *)fdt)) + return -FDT_ERR_BADOFFSET; + if ((end - oldlen + newlen) > ((char *)fdt + fdt_totalsize(fdt))) + return -FDT_ERR_NOSPACE; + memmove(p + newlen, p + oldlen, end - p - oldlen); + return 0; +} + +static int fdt_splice_mem_rsv_(void *fdt, struct fdt_reserve_entry *p, + int oldn, int newn) +{ + int delta = (newn - oldn) * sizeof(*p); + int err; + err = fdt_splice_(fdt, p, oldn * sizeof(*p), newn * sizeof(*p)); + if (err) + return err; + fdt_set_off_dt_struct(fdt, fdt_off_dt_struct(fdt) + delta); + fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); + return 0; +} + +static int fdt_splice_struct_(void *fdt, void *p, + int oldlen, int newlen) +{ + int delta = newlen - oldlen; + int err; + + if ((err = fdt_splice_(fdt, p, oldlen, newlen))) + return err; + + fdt_set_size_dt_struct(fdt, fdt_size_dt_struct(fdt) + delta); + fdt_set_off_dt_strings(fdt, fdt_off_dt_strings(fdt) + delta); + return 0; +} + +/* Must only be used to roll back in case of error */ +static void fdt_del_last_string_(void *fdt, const char *s) +{ + int newlen = strlen(s) + 1; + + fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) - newlen); +} + +static int fdt_splice_string_(void *fdt, int newlen) +{ + void *p = (char *)fdt + + fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt); + int err; + + if ((err = fdt_splice_(fdt, p, 0, newlen))) + return err; + + fdt_set_size_dt_strings(fdt, fdt_size_dt_strings(fdt) + newlen); + return 0; +} + +static int fdt_find_add_string_(void *fdt, const char *s, int *allocated) +{ + char *strtab = (char *)fdt + fdt_off_dt_strings(fdt); + const char *p; + char *new; + int len = strlen(s) + 1; + int err; + + *allocated = 0; + + p = fdt_find_string_(strtab, fdt_size_dt_strings(fdt), s); + if (p) + /* found it */ + return (p - strtab); + + new = strtab + fdt_size_dt_strings(fdt); + err = fdt_splice_string_(fdt, len); + if (err) + return err; + + *allocated = 1; + + memcpy(new, s, len); + return (new - strtab); +} + +int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size) +{ + struct fdt_reserve_entry *re; + int err; + + FDT_RW_PROBE(fdt); + + re = fdt_mem_rsv_w_(fdt, fdt_num_mem_rsv(fdt)); + err = fdt_splice_mem_rsv_(fdt, re, 0, 1); + if (err) + return err; + + re->address = cpu_to_fdt64(address); + re->size = cpu_to_fdt64(size); + return 0; +} + +int fdt_del_mem_rsv(void *fdt, int n) +{ + struct fdt_reserve_entry *re = fdt_mem_rsv_w_(fdt, n); + + FDT_RW_PROBE(fdt); + + if (n >= fdt_num_mem_rsv(fdt)) + return -FDT_ERR_NOTFOUND; + + return fdt_splice_mem_rsv_(fdt, re, 1, 0); +} + +static int fdt_resize_property_(void *fdt, int nodeoffset, const char *name, + int len, struct fdt_property **prop) +{ + int oldlen; + int err; + + *prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); + if (!*prop) + return oldlen; + + if ((err = fdt_splice_struct_(fdt, (*prop)->data, FDT_TAGALIGN(oldlen), + FDT_TAGALIGN(len)))) + return err; + + (*prop)->len = cpu_to_fdt32(len); + return 0; +} + +static int fdt_add_property_(void *fdt, int nodeoffset, const char *name, + int len, struct fdt_property **prop) +{ + int proplen; + int nextoffset; + int namestroff; + int err; + int allocated; + + if ((nextoffset = fdt_check_node_offset_(fdt, nodeoffset)) < 0) + return nextoffset; + + namestroff = fdt_find_add_string_(fdt, name, &allocated); + if (namestroff < 0) + return namestroff; + + *prop = fdt_offset_ptr_w_(fdt, nextoffset); + proplen = sizeof(**prop) + FDT_TAGALIGN(len); + + err = fdt_splice_struct_(fdt, *prop, 0, proplen); + if (err) { + if (allocated) + fdt_del_last_string_(fdt, name); + return err; + } + + (*prop)->tag = cpu_to_fdt32(FDT_PROP); + (*prop)->nameoff = cpu_to_fdt32(namestroff); + (*prop)->len = cpu_to_fdt32(len); + return 0; +} + +int fdt_set_name(void *fdt, int nodeoffset, const char *name) +{ + char *namep; + int oldlen, newlen; + int err; + + FDT_RW_PROBE(fdt); + + namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen); + if (!namep) + return oldlen; + + newlen = strlen(name); + + err = fdt_splice_struct_(fdt, namep, FDT_TAGALIGN(oldlen+1), + FDT_TAGALIGN(newlen+1)); + if (err) + return err; + + memcpy(namep, name, newlen+1); + return 0; +} + +int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name, + int len, void **prop_data) +{ + struct fdt_property *prop; + int err; + + FDT_RW_PROBE(fdt); + + err = fdt_resize_property_(fdt, nodeoffset, name, len, &prop); + if (err == -FDT_ERR_NOTFOUND) + err = fdt_add_property_(fdt, nodeoffset, name, len, &prop); + if (err) + return err; + + *prop_data = prop->data; + return 0; +} + +int fdt_setprop(void *fdt, int nodeoffset, const char *name, + const void *val, int len) +{ + void *prop_data; + int err; + + err = fdt_setprop_placeholder(fdt, nodeoffset, name, len, &prop_data); + if (err) + return err; + + if (len) + memcpy(prop_data, val, len); + return 0; +} + +int fdt_appendprop(void *fdt, int nodeoffset, const char *name, + const void *val, int len) +{ + struct fdt_property *prop; + int err, oldlen, newlen; + + FDT_RW_PROBE(fdt); + + prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen); + if (prop) { + newlen = len + oldlen; + err = fdt_splice_struct_(fdt, prop->data, + FDT_TAGALIGN(oldlen), + FDT_TAGALIGN(newlen)); + if (err) + return err; + prop->len = cpu_to_fdt32(newlen); + memcpy(prop->data + oldlen, val, len); + } else { + err = fdt_add_property_(fdt, nodeoffset, name, len, &prop); + if (err) + return err; + memcpy(prop->data, val, len); + } + return 0; +} + +int fdt_delprop(void *fdt, int nodeoffset, const char *name) +{ + struct fdt_property *prop; + int len, proplen; + + FDT_RW_PROBE(fdt); + + prop = fdt_get_property_w(fdt, nodeoffset, name, &len); + if (!prop) + return len; + + proplen = sizeof(*prop) + FDT_TAGALIGN(len); + return fdt_splice_struct_(fdt, prop, proplen, 0); +} + +int fdt_add_subnode_namelen(void *fdt, int parentoffset, + const char *name, int namelen) +{ + struct fdt_node_header *nh; + int offset, nextoffset; + int nodelen; + int err; + uint32_t tag; + fdt32_t *endtag; + + FDT_RW_PROBE(fdt); + + offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen); + if (offset >= 0) + return -FDT_ERR_EXISTS; + else if (offset != -FDT_ERR_NOTFOUND) + return offset; + + /* Try to place the new node after the parent's properties */ + fdt_next_tag(fdt, parentoffset, &nextoffset); /* skip the BEGIN_NODE */ + do { + offset = nextoffset; + tag = fdt_next_tag(fdt, offset, &nextoffset); + } while ((tag == FDT_PROP) || (tag == FDT_NOP)); + + nh = fdt_offset_ptr_w_(fdt, offset); + nodelen = sizeof(*nh) + FDT_TAGALIGN(namelen+1) + FDT_TAGSIZE; + + err = fdt_splice_struct_(fdt, nh, 0, nodelen); + if (err) + return err; + + nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); + memset(nh->name, 0, FDT_TAGALIGN(namelen+1)); + memcpy(nh->name, name, namelen); + endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE); + *endtag = cpu_to_fdt32(FDT_END_NODE); + + return offset; +} + +int fdt_add_subnode(void *fdt, int parentoffset, const char *name) +{ + return fdt_add_subnode_namelen(fdt, parentoffset, name, strlen(name)); +} + +int fdt_del_node(void *fdt, int nodeoffset) +{ + int endoffset; + + FDT_RW_PROBE(fdt); + + endoffset = fdt_node_end_offset_(fdt, nodeoffset); + if (endoffset < 0) + return endoffset; + + return fdt_splice_struct_(fdt, fdt_offset_ptr_w_(fdt, nodeoffset), + endoffset - nodeoffset, 0); +} + +static void fdt_packblocks_(const char *old, char *new, + int mem_rsv_size, int struct_size) +{ + int mem_rsv_off, struct_off, strings_off; + + mem_rsv_off = FDT_ALIGN(sizeof(struct fdt_header), 8); + struct_off = mem_rsv_off + mem_rsv_size; + strings_off = struct_off + struct_size; + + memmove(new + mem_rsv_off, old + fdt_off_mem_rsvmap(old), mem_rsv_size); + fdt_set_off_mem_rsvmap(new, mem_rsv_off); + + memmove(new + struct_off, old + fdt_off_dt_struct(old), struct_size); + fdt_set_off_dt_struct(new, struct_off); + fdt_set_size_dt_struct(new, struct_size); + + memmove(new + strings_off, old + fdt_off_dt_strings(old), + fdt_size_dt_strings(old)); + fdt_set_off_dt_strings(new, strings_off); + fdt_set_size_dt_strings(new, fdt_size_dt_strings(old)); +} + +int fdt_open_into(const void *fdt, void *buf, int bufsize) +{ + int err; + int mem_rsv_size, struct_size; + int newsize; + const char *fdtstart = fdt; + const char *fdtend = fdtstart + fdt_totalsize(fdt); + char *tmp; + + FDT_RO_PROBE(fdt); + + mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) + * sizeof(struct fdt_reserve_entry); + + if (fdt_version(fdt) >= 17) { + struct_size = fdt_size_dt_struct(fdt); + } else { + struct_size = 0; + while (fdt_next_tag(fdt, struct_size, &struct_size) != FDT_END) + ; + if (struct_size < 0) + return struct_size; + } + + if (!fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) { + /* no further work necessary */ + err = fdt_move(fdt, buf, bufsize); + if (err) + return err; + fdt_set_version(buf, 17); + fdt_set_size_dt_struct(buf, struct_size); + fdt_set_totalsize(buf, bufsize); + return 0; + } + + /* Need to reorder */ + newsize = FDT_ALIGN(sizeof(struct fdt_header), 8) + mem_rsv_size + + struct_size + fdt_size_dt_strings(fdt); + + if (bufsize < newsize) + return -FDT_ERR_NOSPACE; + + /* First attempt to build converted tree at beginning of buffer */ + tmp = buf; + /* But if that overlaps with the old tree... */ + if (((tmp + newsize) > fdtstart) && (tmp < fdtend)) { + /* Try right after the old tree instead */ + tmp = (char *)(uintptr_t)fdtend; + if ((tmp + newsize) > ((char *)buf + bufsize)) + return -FDT_ERR_NOSPACE; + } + + fdt_packblocks_(fdt, tmp, mem_rsv_size, struct_size); + memmove(buf, tmp, newsize); + + fdt_set_magic(buf, FDT_MAGIC); + fdt_set_totalsize(buf, bufsize); + fdt_set_version(buf, 17); + fdt_set_last_comp_version(buf, 16); + fdt_set_boot_cpuid_phys(buf, fdt_boot_cpuid_phys(fdt)); + + return 0; +} + +int fdt_pack(void *fdt) +{ + int mem_rsv_size; + + FDT_RW_PROBE(fdt); + + mem_rsv_size = (fdt_num_mem_rsv(fdt)+1) + * sizeof(struct fdt_reserve_entry); + fdt_packblocks_(fdt, fdt, mem_rsv_size, fdt_size_dt_struct(fdt)); + fdt_set_totalsize(fdt, fdt_data_size_(fdt)); + + return 0; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_strerror.c b/vendor/riscv-isa-sim/fdt/fdt_strerror.c new file mode 100644 index 00000000..768db66e --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_strerror.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +struct fdt_errtabent { + const char *str; +}; + +#define FDT_ERRTABENT(val) \ + [(val)] = { .str = #val, } + +static struct fdt_errtabent fdt_errtable[] = { + FDT_ERRTABENT(FDT_ERR_NOTFOUND), + FDT_ERRTABENT(FDT_ERR_EXISTS), + FDT_ERRTABENT(FDT_ERR_NOSPACE), + + FDT_ERRTABENT(FDT_ERR_BADOFFSET), + FDT_ERRTABENT(FDT_ERR_BADPATH), + FDT_ERRTABENT(FDT_ERR_BADPHANDLE), + FDT_ERRTABENT(FDT_ERR_BADSTATE), + + FDT_ERRTABENT(FDT_ERR_TRUNCATED), + FDT_ERRTABENT(FDT_ERR_BADMAGIC), + FDT_ERRTABENT(FDT_ERR_BADVERSION), + FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE), + FDT_ERRTABENT(FDT_ERR_BADLAYOUT), + FDT_ERRTABENT(FDT_ERR_INTERNAL), + FDT_ERRTABENT(FDT_ERR_BADNCELLS), + FDT_ERRTABENT(FDT_ERR_BADVALUE), + FDT_ERRTABENT(FDT_ERR_BADOVERLAY), + FDT_ERRTABENT(FDT_ERR_NOPHANDLES), + FDT_ERRTABENT(FDT_ERR_BADFLAGS), +}; +#define FDT_ERRTABSIZE (sizeof(fdt_errtable) / sizeof(fdt_errtable[0])) + +const char *fdt_strerror(int errval) +{ + if (errval > 0) + return ""; + else if (errval == 0) + return ""; + else if (errval > -FDT_ERRTABSIZE) { + const char *s = fdt_errtable[-errval].str; + + if (s) + return s; + } + + return ""; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_sw.c b/vendor/riscv-isa-sim/fdt/fdt_sw.c new file mode 100644 index 00000000..76bea22f --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_sw.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +static int fdt_sw_probe_(void *fdt) +{ + if (fdt_magic(fdt) == FDT_MAGIC) + return -FDT_ERR_BADSTATE; + else if (fdt_magic(fdt) != FDT_SW_MAGIC) + return -FDT_ERR_BADMAGIC; + return 0; +} + +#define FDT_SW_PROBE(fdt) \ + { \ + int err; \ + if ((err = fdt_sw_probe_(fdt)) != 0) \ + return err; \ + } + +/* 'memrsv' state: Initial state after fdt_create() + * + * Allowed functions: + * fdt_add_reservmap_entry() + * fdt_finish_reservemap() [moves to 'struct' state] + */ +static int fdt_sw_probe_memrsv_(void *fdt) +{ + int err = fdt_sw_probe_(fdt); + if (err) + return err; + + if (fdt_off_dt_strings(fdt) != 0) + return -FDT_ERR_BADSTATE; + return 0; +} + +#define FDT_SW_PROBE_MEMRSV(fdt) \ + { \ + int err; \ + if ((err = fdt_sw_probe_memrsv_(fdt)) != 0) \ + return err; \ + } + +/* 'struct' state: Enter this state after fdt_finish_reservemap() + * + * Allowed functions: + * fdt_begin_node() + * fdt_end_node() + * fdt_property*() + * fdt_finish() [moves to 'complete' state] + */ +static int fdt_sw_probe_struct_(void *fdt) +{ + int err = fdt_sw_probe_(fdt); + if (err) + return err; + + if (fdt_off_dt_strings(fdt) != fdt_totalsize(fdt)) + return -FDT_ERR_BADSTATE; + return 0; +} + +#define FDT_SW_PROBE_STRUCT(fdt) \ + { \ + int err; \ + if ((err = fdt_sw_probe_struct_(fdt)) != 0) \ + return err; \ + } + +static inline uint32_t sw_flags(void *fdt) +{ + /* assert: (fdt_magic(fdt) == FDT_SW_MAGIC) */ + return fdt_last_comp_version(fdt); +} + +/* 'complete' state: Enter this state after fdt_finish() + * + * Allowed functions: none + */ + +static void *fdt_grab_space_(void *fdt, size_t len) +{ + int offset = fdt_size_dt_struct(fdt); + int spaceleft; + + spaceleft = fdt_totalsize(fdt) - fdt_off_dt_struct(fdt) + - fdt_size_dt_strings(fdt); + + if ((offset + len < offset) || (offset + len > spaceleft)) + return NULL; + + fdt_set_size_dt_struct(fdt, offset + len); + return fdt_offset_ptr_w_(fdt, offset); +} + +int fdt_create_with_flags(void *buf, int bufsize, uint32_t flags) +{ + const size_t hdrsize = FDT_ALIGN(sizeof(struct fdt_header), + sizeof(struct fdt_reserve_entry)); + void *fdt = buf; + + if (bufsize < hdrsize) + return -FDT_ERR_NOSPACE; + + if (flags & ~FDT_CREATE_FLAGS_ALL) + return -FDT_ERR_BADFLAGS; + + memset(buf, 0, bufsize); + + /* + * magic and last_comp_version keep intermediate state during the fdt + * creation process, which is replaced with the proper FDT format by + * fdt_finish(). + * + * flags should be accessed with sw_flags(). + */ + fdt_set_magic(fdt, FDT_SW_MAGIC); + fdt_set_version(fdt, FDT_LAST_SUPPORTED_VERSION); + fdt_set_last_comp_version(fdt, flags); + + fdt_set_totalsize(fdt, bufsize); + + fdt_set_off_mem_rsvmap(fdt, hdrsize); + fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt)); + fdt_set_off_dt_strings(fdt, 0); + + return 0; +} + +int fdt_create(void *buf, int bufsize) +{ + return fdt_create_with_flags(buf, bufsize, 0); +} + +int fdt_resize(void *fdt, void *buf, int bufsize) +{ + size_t headsize, tailsize; + char *oldtail, *newtail; + + FDT_SW_PROBE(fdt); + + headsize = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); + tailsize = fdt_size_dt_strings(fdt); + + if ((headsize + tailsize) > fdt_totalsize(fdt)) + return -FDT_ERR_INTERNAL; + + if ((headsize + tailsize) > bufsize) + return -FDT_ERR_NOSPACE; + + oldtail = (char *)fdt + fdt_totalsize(fdt) - tailsize; + newtail = (char *)buf + bufsize - tailsize; + + /* Two cases to avoid clobbering data if the old and new + * buffers partially overlap */ + if (buf <= fdt) { + memmove(buf, fdt, headsize); + memmove(newtail, oldtail, tailsize); + } else { + memmove(newtail, oldtail, tailsize); + memmove(buf, fdt, headsize); + } + + fdt_set_totalsize(buf, bufsize); + if (fdt_off_dt_strings(buf)) + fdt_set_off_dt_strings(buf, bufsize); + + return 0; +} + +int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size) +{ + struct fdt_reserve_entry *re; + int offset; + + FDT_SW_PROBE_MEMRSV(fdt); + + offset = fdt_off_dt_struct(fdt); + if ((offset + sizeof(*re)) > fdt_totalsize(fdt)) + return -FDT_ERR_NOSPACE; + + re = (struct fdt_reserve_entry *)((char *)fdt + offset); + re->address = cpu_to_fdt64(addr); + re->size = cpu_to_fdt64(size); + + fdt_set_off_dt_struct(fdt, offset + sizeof(*re)); + + return 0; +} + +int fdt_finish_reservemap(void *fdt) +{ + int err = fdt_add_reservemap_entry(fdt, 0, 0); + + if (err) + return err; + + fdt_set_off_dt_strings(fdt, fdt_totalsize(fdt)); + return 0; +} + +int fdt_begin_node(void *fdt, const char *name) +{ + struct fdt_node_header *nh; + int namelen; + + FDT_SW_PROBE_STRUCT(fdt); + + namelen = strlen(name) + 1; + nh = fdt_grab_space_(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen)); + if (! nh) + return -FDT_ERR_NOSPACE; + + nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); + memcpy(nh->name, name, namelen); + return 0; +} + +int fdt_end_node(void *fdt) +{ + fdt32_t *en; + + FDT_SW_PROBE_STRUCT(fdt); + + en = fdt_grab_space_(fdt, FDT_TAGSIZE); + if (! en) + return -FDT_ERR_NOSPACE; + + *en = cpu_to_fdt32(FDT_END_NODE); + return 0; +} + +static int fdt_add_string_(void *fdt, const char *s) +{ + char *strtab = (char *)fdt + fdt_totalsize(fdt); + int strtabsize = fdt_size_dt_strings(fdt); + int len = strlen(s) + 1; + int struct_top, offset; + + offset = -strtabsize - len; + struct_top = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); + if (fdt_totalsize(fdt) + offset < struct_top) + return 0; /* no more room :( */ + + memcpy(strtab + offset, s, len); + fdt_set_size_dt_strings(fdt, strtabsize + len); + return offset; +} + +/* Must only be used to roll back in case of error */ +static void fdt_del_last_string_(void *fdt, const char *s) +{ + int strtabsize = fdt_size_dt_strings(fdt); + int len = strlen(s) + 1; + + fdt_set_size_dt_strings(fdt, strtabsize - len); +} + +static int fdt_find_add_string_(void *fdt, const char *s, int *allocated) +{ + char *strtab = (char *)fdt + fdt_totalsize(fdt); + int strtabsize = fdt_size_dt_strings(fdt); + const char *p; + + *allocated = 0; + + p = fdt_find_string_(strtab - strtabsize, strtabsize, s); + if (p) + return p - strtab; + + *allocated = 1; + + return fdt_add_string_(fdt, s); +} + +int fdt_property_placeholder(void *fdt, const char *name, int len, void **valp) +{ + struct fdt_property *prop; + int nameoff; + int allocated; + + FDT_SW_PROBE_STRUCT(fdt); + + /* String de-duplication can be slow, _NO_NAME_DEDUP skips it */ + if (sw_flags(fdt) & FDT_CREATE_FLAG_NO_NAME_DEDUP) { + allocated = 1; + nameoff = fdt_add_string_(fdt, name); + } else { + nameoff = fdt_find_add_string_(fdt, name, &allocated); + } + if (nameoff == 0) + return -FDT_ERR_NOSPACE; + + prop = fdt_grab_space_(fdt, sizeof(*prop) + FDT_TAGALIGN(len)); + if (! prop) { + if (allocated) + fdt_del_last_string_(fdt, name); + return -FDT_ERR_NOSPACE; + } + + prop->tag = cpu_to_fdt32(FDT_PROP); + prop->nameoff = cpu_to_fdt32(nameoff); + prop->len = cpu_to_fdt32(len); + *valp = prop->data; + return 0; +} + +int fdt_property(void *fdt, const char *name, const void *val, int len) +{ + void *ptr; + int ret; + + ret = fdt_property_placeholder(fdt, name, len, &ptr); + if (ret) + return ret; + memcpy(ptr, val, len); + return 0; +} + +int fdt_finish(void *fdt) +{ + char *p = (char *)fdt; + fdt32_t *end; + int oldstroffset, newstroffset; + uint32_t tag; + int offset, nextoffset; + + FDT_SW_PROBE_STRUCT(fdt); + + /* Add terminator */ + end = fdt_grab_space_(fdt, sizeof(*end)); + if (! end) + return -FDT_ERR_NOSPACE; + *end = cpu_to_fdt32(FDT_END); + + /* Relocate the string table */ + oldstroffset = fdt_totalsize(fdt) - fdt_size_dt_strings(fdt); + newstroffset = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt); + memmove(p + newstroffset, p + oldstroffset, fdt_size_dt_strings(fdt)); + fdt_set_off_dt_strings(fdt, newstroffset); + + /* Walk the structure, correcting string offsets */ + offset = 0; + while ((tag = fdt_next_tag(fdt, offset, &nextoffset)) != FDT_END) { + if (tag == FDT_PROP) { + struct fdt_property *prop = + fdt_offset_ptr_w_(fdt, offset); + int nameoff; + + nameoff = fdt32_to_cpu(prop->nameoff); + nameoff += fdt_size_dt_strings(fdt); + prop->nameoff = cpu_to_fdt32(nameoff); + } + offset = nextoffset; + } + if (nextoffset < 0) + return nextoffset; + + /* Finally, adjust the header */ + fdt_set_totalsize(fdt, newstroffset + fdt_size_dt_strings(fdt)); + + /* And fix up fields that were keeping intermediate state. */ + fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION); + fdt_set_magic(fdt, FDT_MAGIC); + + return 0; +} diff --git a/vendor/riscv-isa-sim/fdt/fdt_wip.c b/vendor/riscv-isa-sim/fdt/fdt_wip.c new file mode 100644 index 00000000..f64139e0 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/fdt_wip.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include "libfdt_env.h" + +#include +#include + +#include "libfdt_internal.h" + +int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset, + const char *name, int namelen, + uint32_t idx, const void *val, + int len) +{ + void *propval; + int proplen; + + propval = fdt_getprop_namelen_w(fdt, nodeoffset, name, namelen, + &proplen); + if (!propval) + return proplen; + + if (proplen < (len + idx)) + return -FDT_ERR_NOSPACE; + + memcpy((char *)propval + idx, val, len); + return 0; +} + +int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name, + const void *val, int len) +{ + const void *propval; + int proplen; + + propval = fdt_getprop(fdt, nodeoffset, name, &proplen); + if (!propval) + return proplen; + + if (proplen != len) + return -FDT_ERR_NOSPACE; + + return fdt_setprop_inplace_namelen_partial(fdt, nodeoffset, name, + strlen(name), 0, + val, len); +} + +static void fdt_nop_region_(void *start, int len) +{ + fdt32_t *p; + + for (p = start; (char *)p < ((char *)start + len); p++) + *p = cpu_to_fdt32(FDT_NOP); +} + +int fdt_nop_property(void *fdt, int nodeoffset, const char *name) +{ + struct fdt_property *prop; + int len; + + prop = fdt_get_property_w(fdt, nodeoffset, name, &len); + if (!prop) + return len; + + fdt_nop_region_(prop, len + sizeof(*prop)); + + return 0; +} + +int fdt_node_end_offset_(void *fdt, int offset) +{ + int depth = 0; + + while ((offset >= 0) && (depth >= 0)) + offset = fdt_next_node(fdt, offset, &depth); + + return offset; +} + +int fdt_nop_node(void *fdt, int nodeoffset) +{ + int endoffset; + + endoffset = fdt_node_end_offset_(fdt, nodeoffset); + if (endoffset < 0) + return endoffset; + + fdt_nop_region_(fdt_offset_ptr_w(fdt, nodeoffset, 0), + endoffset - nodeoffset); + return 0; +} diff --git a/vendor/riscv-isa-sim/fdt/libfdt.h b/vendor/riscv-isa-sim/fdt/libfdt.h new file mode 100644 index 00000000..d2356cce --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/libfdt.h @@ -0,0 +1,2077 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +#ifndef LIBFDT_H +#define LIBFDT_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ + +#include +#include + +#define FDT_FIRST_SUPPORTED_VERSION 0x02 +#define FDT_LAST_SUPPORTED_VERSION 0x11 + +/* Error codes: informative error codes */ +#define FDT_ERR_NOTFOUND 1 + /* FDT_ERR_NOTFOUND: The requested node or property does not exist */ +#define FDT_ERR_EXISTS 2 + /* FDT_ERR_EXISTS: Attempted to create a node or property which + * already exists */ +#define FDT_ERR_NOSPACE 3 + /* FDT_ERR_NOSPACE: Operation needed to expand the device + * tree, but its buffer did not have sufficient space to + * contain the expanded tree. Use fdt_open_into() to move the + * device tree to a buffer with more space. */ + +/* Error codes: codes for bad parameters */ +#define FDT_ERR_BADOFFSET 4 + /* FDT_ERR_BADOFFSET: Function was passed a structure block + * offset which is out-of-bounds, or which points to an + * unsuitable part of the structure for the operation. */ +#define FDT_ERR_BADPATH 5 + /* FDT_ERR_BADPATH: Function was passed a badly formatted path + * (e.g. missing a leading / for a function which requires an + * absolute path) */ +#define FDT_ERR_BADPHANDLE 6 + /* FDT_ERR_BADPHANDLE: Function was passed an invalid phandle. + * This can be caused either by an invalid phandle property + * length, or the phandle value was either 0 or -1, which are + * not permitted. */ +#define FDT_ERR_BADSTATE 7 + /* FDT_ERR_BADSTATE: Function was passed an incomplete device + * tree created by the sequential-write functions, which is + * not sufficiently complete for the requested operation. */ + +/* Error codes: codes for bad device tree blobs */ +#define FDT_ERR_TRUNCATED 8 + /* FDT_ERR_TRUNCATED: FDT or a sub-block is improperly + * terminated (overflows, goes outside allowed bounds, or + * isn't properly terminated). */ +#define FDT_ERR_BADMAGIC 9 + /* FDT_ERR_BADMAGIC: Given "device tree" appears not to be a + * device tree at all - it is missing the flattened device + * tree magic number. */ +#define FDT_ERR_BADVERSION 10 + /* FDT_ERR_BADVERSION: Given device tree has a version which + * can't be handled by the requested operation. For + * read-write functions, this may mean that fdt_open_into() is + * required to convert the tree to the expected version. */ +#define FDT_ERR_BADSTRUCTURE 11 + /* FDT_ERR_BADSTRUCTURE: Given device tree has a corrupt + * structure block or other serious error (e.g. misnested + * nodes, or subnodes preceding properties). */ +#define FDT_ERR_BADLAYOUT 12 + /* FDT_ERR_BADLAYOUT: For read-write functions, the given + * device tree has it's sub-blocks in an order that the + * function can't handle (memory reserve map, then structure, + * then strings). Use fdt_open_into() to reorganize the tree + * into a form suitable for the read-write operations. */ + +/* "Can't happen" error indicating a bug in libfdt */ +#define FDT_ERR_INTERNAL 13 + /* FDT_ERR_INTERNAL: libfdt has failed an internal assertion. + * Should never be returned, if it is, it indicates a bug in + * libfdt itself. */ + +/* Errors in device tree content */ +#define FDT_ERR_BADNCELLS 14 + /* FDT_ERR_BADNCELLS: Device tree has a #address-cells, #size-cells + * or similar property with a bad format or value */ + +#define FDT_ERR_BADVALUE 15 + /* FDT_ERR_BADVALUE: Device tree has a property with an unexpected + * value. For example: a property expected to contain a string list + * is not NUL-terminated within the length of its value. */ + +#define FDT_ERR_BADOVERLAY 16 + /* FDT_ERR_BADOVERLAY: The device tree overlay, while + * correctly structured, cannot be applied due to some + * unexpected or missing value, property or node. */ + +#define FDT_ERR_NOPHANDLES 17 + /* FDT_ERR_NOPHANDLES: The device tree doesn't have any + * phandle available anymore without causing an overflow */ + +#define FDT_ERR_BADFLAGS 18 + /* FDT_ERR_BADFLAGS: The function was passed a flags field that + * contains invalid flags or an invalid combination of flags. */ + +#define FDT_ERR_MAX 18 + +/* constants */ +#define FDT_MAX_PHANDLE 0xfffffffe + /* Valid values for phandles range from 1 to 2^32-2. */ + +#ifdef __cplusplus +extern "C" { +#endif +/**********************************************************************/ +/* Low-level functions (you probably don't need these) */ +/**********************************************************************/ + +#ifndef SWIG /* This function is not useful in Python */ +const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int checklen); +#endif +static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen) +{ + return (void *)(uintptr_t)fdt_offset_ptr(fdt, offset, checklen); +} + +uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset); + +/* + * Alignment helpers: + * These helpers access words from a device tree blob. They're + * built to work even with unaligned pointers on platforms (ike + * ARM) that don't like unaligned loads and stores + */ + +static inline uint32_t fdt32_ld(const fdt32_t *p) +{ + const uint8_t *bp = (const uint8_t *)p; + + return ((uint32_t)bp[0] << 24) + | ((uint32_t)bp[1] << 16) + | ((uint32_t)bp[2] << 8) + | bp[3]; +} + +static inline void fdt32_st(void *property, uint32_t value) +{ + uint8_t *bp = (uint8_t *)property; + + bp[0] = value >> 24; + bp[1] = (value >> 16) & 0xff; + bp[2] = (value >> 8) & 0xff; + bp[3] = value & 0xff; +} + +static inline uint64_t fdt64_ld(const fdt64_t *p) +{ + const uint8_t *bp = (const uint8_t *)p; + + return ((uint64_t)bp[0] << 56) + | ((uint64_t)bp[1] << 48) + | ((uint64_t)bp[2] << 40) + | ((uint64_t)bp[3] << 32) + | ((uint64_t)bp[4] << 24) + | ((uint64_t)bp[5] << 16) + | ((uint64_t)bp[6] << 8) + | bp[7]; +} + +static inline void fdt64_st(void *property, uint64_t value) +{ + uint8_t *bp = (uint8_t *)property; + + bp[0] = value >> 56; + bp[1] = (value >> 48) & 0xff; + bp[2] = (value >> 40) & 0xff; + bp[3] = (value >> 32) & 0xff; + bp[4] = (value >> 24) & 0xff; + bp[5] = (value >> 16) & 0xff; + bp[6] = (value >> 8) & 0xff; + bp[7] = value & 0xff; +} + +/**********************************************************************/ +/* Traversal functions */ +/**********************************************************************/ + +int fdt_next_node(const void *fdt, int offset, int *depth); + +/** + * fdt_first_subnode() - get offset of first direct subnode + * + * @fdt: FDT blob + * @offset: Offset of node to check + * @return offset of first subnode, or -FDT_ERR_NOTFOUND if there is none + */ +int fdt_first_subnode(const void *fdt, int offset); + +/** + * fdt_next_subnode() - get offset of next direct subnode + * + * After first calling fdt_first_subnode(), call this function repeatedly to + * get direct subnodes of a parent node. + * + * @fdt: FDT blob + * @offset: Offset of previous subnode + * @return offset of next subnode, or -FDT_ERR_NOTFOUND if there are no more + * subnodes + */ +int fdt_next_subnode(const void *fdt, int offset); + +/** + * fdt_for_each_subnode - iterate over all subnodes of a parent + * + * @node: child node (int, lvalue) + * @fdt: FDT blob (const void *) + * @parent: parent node (int) + * + * This is actually a wrapper around a for loop and would be used like so: + * + * fdt_for_each_subnode(node, fdt, parent) { + * Use node + * ... + * } + * + * if ((node < 0) && (node != -FDT_ERR_NOTFOUND)) { + * Error handling + * } + * + * Note that this is implemented as a macro and @node is used as + * iterator in the loop. The parent variable be constant or even a + * literal. + * + */ +#define fdt_for_each_subnode(node, fdt, parent) \ + for (node = fdt_first_subnode(fdt, parent); \ + node >= 0; \ + node = fdt_next_subnode(fdt, node)) + +/**********************************************************************/ +/* General functions */ +/**********************************************************************/ +#define fdt_get_header(fdt, field) \ + (fdt32_ld(&((const struct fdt_header *)(fdt))->field)) +#define fdt_magic(fdt) (fdt_get_header(fdt, magic)) +#define fdt_totalsize(fdt) (fdt_get_header(fdt, totalsize)) +#define fdt_off_dt_struct(fdt) (fdt_get_header(fdt, off_dt_struct)) +#define fdt_off_dt_strings(fdt) (fdt_get_header(fdt, off_dt_strings)) +#define fdt_off_mem_rsvmap(fdt) (fdt_get_header(fdt, off_mem_rsvmap)) +#define fdt_version(fdt) (fdt_get_header(fdt, version)) +#define fdt_last_comp_version(fdt) (fdt_get_header(fdt, last_comp_version)) +#define fdt_boot_cpuid_phys(fdt) (fdt_get_header(fdt, boot_cpuid_phys)) +#define fdt_size_dt_strings(fdt) (fdt_get_header(fdt, size_dt_strings)) +#define fdt_size_dt_struct(fdt) (fdt_get_header(fdt, size_dt_struct)) + +#define fdt_set_hdr_(name) \ + static inline void fdt_set_##name(void *fdt, uint32_t val) \ + { \ + struct fdt_header *fdth = (struct fdt_header *)fdt; \ + fdth->name = cpu_to_fdt32(val); \ + } +fdt_set_hdr_(magic); +fdt_set_hdr_(totalsize); +fdt_set_hdr_(off_dt_struct); +fdt_set_hdr_(off_dt_strings); +fdt_set_hdr_(off_mem_rsvmap); +fdt_set_hdr_(version); +fdt_set_hdr_(last_comp_version); +fdt_set_hdr_(boot_cpuid_phys); +fdt_set_hdr_(size_dt_strings); +fdt_set_hdr_(size_dt_struct); +#undef fdt_set_hdr_ + +/** + * fdt_header_size - return the size of the tree's header + * @fdt: pointer to a flattened device tree + */ +size_t fdt_header_size_(uint32_t version); +static inline size_t fdt_header_size(const void *fdt) +{ + return fdt_header_size_(fdt_version(fdt)); +} + +/** + * fdt_check_header - sanity check a device tree header + + * @fdt: pointer to data which might be a flattened device tree + * + * fdt_check_header() checks that the given buffer contains what + * appears to be a flattened device tree, and that the header contains + * valid information (to the extent that can be determined from the + * header alone). + * + * returns: + * 0, if the buffer appears to contain a valid device tree + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_TRUNCATED, standard meanings, as above + */ +int fdt_check_header(const void *fdt); + +/** + * fdt_move - move a device tree around in memory + * @fdt: pointer to the device tree to move + * @buf: pointer to memory where the device is to be moved + * @bufsize: size of the memory space at buf + * + * fdt_move() relocates, if possible, the device tree blob located at + * fdt to the buffer at buf of size bufsize. The buffer may overlap + * with the existing device tree blob at fdt. Therefore, + * fdt_move(fdt, fdt, fdt_totalsize(fdt)) + * should always succeed. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, bufsize is insufficient to contain the device tree + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, standard meanings + */ +int fdt_move(const void *fdt, void *buf, int bufsize); + +/**********************************************************************/ +/* Read-only functions */ +/**********************************************************************/ + +int fdt_check_full(const void *fdt, size_t bufsize); + +/** + * fdt_get_string - retrieve a string from the strings block of a device tree + * @fdt: pointer to the device tree blob + * @stroffset: offset of the string within the strings block (native endian) + * @lenp: optional pointer to return the string's length + * + * fdt_get_string() retrieves a pointer to a single string from the + * strings block of the device tree blob at fdt, and optionally also + * returns the string's length in *lenp. + * + * returns: + * a pointer to the string, on success + * NULL, if stroffset is out of bounds, or doesn't point to a valid string + */ +const char *fdt_get_string(const void *fdt, int stroffset, int *lenp); + +/** + * fdt_string - retrieve a string from the strings block of a device tree + * @fdt: pointer to the device tree blob + * @stroffset: offset of the string within the strings block (native endian) + * + * fdt_string() retrieves a pointer to a single string from the + * strings block of the device tree blob at fdt. + * + * returns: + * a pointer to the string, on success + * NULL, if stroffset is out of bounds, or doesn't point to a valid string + */ +const char *fdt_string(const void *fdt, int stroffset); + +/** + * fdt_find_max_phandle - find and return the highest phandle in a tree + * @fdt: pointer to the device tree blob + * @phandle: return location for the highest phandle value found in the tree + * + * fdt_find_max_phandle() finds the highest phandle value in the given device + * tree. The value returned in @phandle is only valid if the function returns + * success. + * + * returns: + * 0 on success or a negative error code on failure + */ +int fdt_find_max_phandle(const void *fdt, uint32_t *phandle); + +/** + * fdt_get_max_phandle - retrieves the highest phandle in a tree + * @fdt: pointer to the device tree blob + * + * fdt_get_max_phandle retrieves the highest phandle in the given + * device tree. This will ignore badly formatted phandles, or phandles + * with a value of 0 or -1. + * + * This function is deprecated in favour of fdt_find_max_phandle(). + * + * returns: + * the highest phandle on success + * 0, if no phandle was found in the device tree + * -1, if an error occurred + */ +static inline uint32_t fdt_get_max_phandle(const void *fdt) +{ + uint32_t phandle; + int err; + + err = fdt_find_max_phandle(fdt, &phandle); + if (err < 0) + return (uint32_t)-1; + + return phandle; +} + +/** + * fdt_generate_phandle - return a new, unused phandle for a device tree blob + * @fdt: pointer to the device tree blob + * @phandle: return location for the new phandle + * + * Walks the device tree blob and looks for the highest phandle value. On + * success, the new, unused phandle value (one higher than the previously + * highest phandle value in the device tree blob) will be returned in the + * @phandle parameter. + * + * Returns: + * 0 on success or a negative error-code on failure + */ +int fdt_generate_phandle(const void *fdt, uint32_t *phandle); + +/** + * fdt_num_mem_rsv - retrieve the number of memory reserve map entries + * @fdt: pointer to the device tree blob + * + * Returns the number of entries in the device tree blob's memory + * reservation map. This does not include the terminating 0,0 entry + * or any other (0,0) entries reserved for expansion. + * + * returns: + * the number of entries + */ +int fdt_num_mem_rsv(const void *fdt); + +/** + * fdt_get_mem_rsv - retrieve one memory reserve map entry + * @fdt: pointer to the device tree blob + * @address, @size: pointers to 64-bit variables + * + * On success, *address and *size will contain the address and size of + * the n-th reserve map entry from the device tree blob, in + * native-endian format. + * + * returns: + * 0, on success + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, standard meanings + */ +int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size); + +/** + * fdt_subnode_offset_namelen - find a subnode based on substring + * @fdt: pointer to the device tree blob + * @parentoffset: structure block offset of a node + * @name: name of the subnode to locate + * @namelen: number of characters of name to consider + * + * Identical to fdt_subnode_offset(), but only examine the first + * namelen characters of name for matching the subnode name. This is + * useful for finding subnodes based on a portion of a larger string, + * such as a full path. + */ +#ifndef SWIG /* Not available in Python */ +int fdt_subnode_offset_namelen(const void *fdt, int parentoffset, + const char *name, int namelen); +#endif +/** + * fdt_subnode_offset - find a subnode of a given node + * @fdt: pointer to the device tree blob + * @parentoffset: structure block offset of a node + * @name: name of the subnode to locate + * + * fdt_subnode_offset() finds a subnode of the node at structure block + * offset parentoffset with the given name. name may include a unit + * address, in which case fdt_subnode_offset() will find the subnode + * with that unit address, or the unit address may be omitted, in + * which case fdt_subnode_offset() will find an arbitrary subnode + * whose name excluding unit address matches the given name. + * + * returns: + * structure block offset of the requested subnode (>=0), on success + * -FDT_ERR_NOTFOUND, if the requested subnode does not exist + * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE + * tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name); + +/** + * fdt_path_offset_namelen - find a tree node by its full path + * @fdt: pointer to the device tree blob + * @path: full path of the node to locate + * @namelen: number of characters of path to consider + * + * Identical to fdt_path_offset(), but only consider the first namelen + * characters of path as the path name. + */ +#ifndef SWIG /* Not available in Python */ +int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen); +#endif + +/** + * fdt_path_offset - find a tree node by its full path + * @fdt: pointer to the device tree blob + * @path: full path of the node to locate + * + * fdt_path_offset() finds a node of a given path in the device tree. + * Each path component may omit the unit address portion, but the + * results of this are undefined if any such path component is + * ambiguous (that is if there are multiple nodes at the relevant + * level matching the given component, differentiated only by unit + * address). + * + * returns: + * structure block offset of the node with the requested path (>=0), on + * success + * -FDT_ERR_BADPATH, given path does not begin with '/' or is invalid + * -FDT_ERR_NOTFOUND, if the requested node does not exist + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_path_offset(const void *fdt, const char *path); + +/** + * fdt_get_name - retrieve the name of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: structure block offset of the starting node + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_get_name() retrieves the name (including unit address) of the + * device tree node at structure block offset nodeoffset. If lenp is + * non-NULL, the length of this name is also returned, in the integer + * pointed to by lenp. + * + * returns: + * pointer to the node's name, on success + * If lenp is non-NULL, *lenp contains the length of that name + * (>=0) + * NULL, on error + * if lenp is non-NULL *lenp contains an error code (<0): + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE + * tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, standard meanings + */ +const char *fdt_get_name(const void *fdt, int nodeoffset, int *lenp); + +/** + * fdt_first_property_offset - find the offset of a node's first property + * @fdt: pointer to the device tree blob + * @nodeoffset: structure block offset of a node + * + * fdt_first_property_offset() finds the first property of the node at + * the given structure block offset. + * + * returns: + * structure block offset of the property (>=0), on success + * -FDT_ERR_NOTFOUND, if the requested node has no properties + * -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_first_property_offset(const void *fdt, int nodeoffset); + +/** + * fdt_next_property_offset - step through a node's properties + * @fdt: pointer to the device tree blob + * @offset: structure block offset of a property + * + * fdt_next_property_offset() finds the property immediately after the + * one at the given structure block offset. This will be a property + * of the same node as the given property. + * + * returns: + * structure block offset of the next property (>=0), on success + * -FDT_ERR_NOTFOUND, if the given property is the last in its node + * -FDT_ERR_BADOFFSET, if nodeoffset did not point to an FDT_PROP tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_next_property_offset(const void *fdt, int offset); + +/** + * fdt_for_each_property_offset - iterate over all properties of a node + * + * @property_offset: property offset (int, lvalue) + * @fdt: FDT blob (const void *) + * @node: node offset (int) + * + * This is actually a wrapper around a for loop and would be used like so: + * + * fdt_for_each_property_offset(property, fdt, node) { + * Use property + * ... + * } + * + * if ((property < 0) && (property != -FDT_ERR_NOTFOUND)) { + * Error handling + * } + * + * Note that this is implemented as a macro and property is used as + * iterator in the loop. The node variable can be constant or even a + * literal. + */ +#define fdt_for_each_property_offset(property, fdt, node) \ + for (property = fdt_first_property_offset(fdt, node); \ + property >= 0; \ + property = fdt_next_property_offset(fdt, property)) + +/** + * fdt_get_property_by_offset - retrieve the property at a given offset + * @fdt: pointer to the device tree blob + * @offset: offset of the property to retrieve + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_get_property_by_offset() retrieves a pointer to the + * fdt_property structure within the device tree blob at the given + * offset. If lenp is non-NULL, the length of the property value is + * also returned, in the integer pointed to by lenp. + * + * Note that this code only works on device tree versions >= 16. fdt_getprop() + * works on all versions. + * + * returns: + * pointer to the structure representing the property + * if lenp is non-NULL, *lenp contains the length of the property + * value (>=0) + * NULL, on error + * if lenp is non-NULL, *lenp contains an error code (<0): + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +const struct fdt_property *fdt_get_property_by_offset(const void *fdt, + int offset, + int *lenp); + +/** + * fdt_get_property_namelen - find a property based on substring + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to find + * @name: name of the property to find + * @namelen: number of characters of name to consider + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * Identical to fdt_get_property(), but only examine the first namelen + * characters of name for matching the property name. + */ +#ifndef SWIG /* Not available in Python */ +const struct fdt_property *fdt_get_property_namelen(const void *fdt, + int nodeoffset, + const char *name, + int namelen, int *lenp); +#endif + +/** + * fdt_get_property - find a given property in a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to find + * @name: name of the property to find + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_get_property() retrieves a pointer to the fdt_property + * structure within the device tree blob corresponding to the property + * named 'name' of the node at offset nodeoffset. If lenp is + * non-NULL, the length of the property value is also returned, in the + * integer pointed to by lenp. + * + * returns: + * pointer to the structure representing the property + * if lenp is non-NULL, *lenp contains the length of the property + * value (>=0) + * NULL, on error + * if lenp is non-NULL, *lenp contains an error code (<0): + * -FDT_ERR_NOTFOUND, node does not have named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE + * tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +const struct fdt_property *fdt_get_property(const void *fdt, int nodeoffset, + const char *name, int *lenp); +static inline struct fdt_property *fdt_get_property_w(void *fdt, int nodeoffset, + const char *name, + int *lenp) +{ + return (struct fdt_property *)(uintptr_t) + fdt_get_property(fdt, nodeoffset, name, lenp); +} + +/** + * fdt_getprop_by_offset - retrieve the value of a property at a given offset + * @fdt: pointer to the device tree blob + * @offset: offset of the property to read + * @namep: pointer to a string variable (will be overwritten) or NULL + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_getprop_by_offset() retrieves a pointer to the value of the + * property at structure block offset 'offset' (this will be a pointer + * to within the device blob itself, not a copy of the value). If + * lenp is non-NULL, the length of the property value is also + * returned, in the integer pointed to by lenp. If namep is non-NULL, + * the property's namne will also be returned in the char * pointed to + * by namep (this will be a pointer to within the device tree's string + * block, not a new copy of the name). + * + * returns: + * pointer to the property's value + * if lenp is non-NULL, *lenp contains the length of the property + * value (>=0) + * if namep is non-NULL *namep contiains a pointer to the property + * name. + * NULL, on error + * if lenp is non-NULL, *lenp contains an error code (<0): + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_PROP tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#ifndef SWIG /* This function is not useful in Python */ +const void *fdt_getprop_by_offset(const void *fdt, int offset, + const char **namep, int *lenp); +#endif + +/** + * fdt_getprop_namelen - get property value based on substring + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to find + * @name: name of the property to find + * @namelen: number of characters of name to consider + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * Identical to fdt_getprop(), but only examine the first namelen + * characters of name for matching the property name. + */ +#ifndef SWIG /* Not available in Python */ +const void *fdt_getprop_namelen(const void *fdt, int nodeoffset, + const char *name, int namelen, int *lenp); +static inline void *fdt_getprop_namelen_w(void *fdt, int nodeoffset, + const char *name, int namelen, + int *lenp) +{ + return (void *)(uintptr_t)fdt_getprop_namelen(fdt, nodeoffset, name, + namelen, lenp); +} +#endif + +/** + * fdt_getprop - retrieve the value of a given property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to find + * @name: name of the property to find + * @lenp: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_getprop() retrieves a pointer to the value of the property + * named 'name' of the node at offset nodeoffset (this will be a + * pointer to within the device blob itself, not a copy of the value). + * If lenp is non-NULL, the length of the property value is also + * returned, in the integer pointed to by lenp. + * + * returns: + * pointer to the property's value + * if lenp is non-NULL, *lenp contains the length of the property + * value (>=0) + * NULL, on error + * if lenp is non-NULL, *lenp contains an error code (<0): + * -FDT_ERR_NOTFOUND, node does not have named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE + * tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +const void *fdt_getprop(const void *fdt, int nodeoffset, + const char *name, int *lenp); +static inline void *fdt_getprop_w(void *fdt, int nodeoffset, + const char *name, int *lenp) +{ + return (void *)(uintptr_t)fdt_getprop(fdt, nodeoffset, name, lenp); +} + +/** + * fdt_get_phandle - retrieve the phandle of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: structure block offset of the node + * + * fdt_get_phandle() retrieves the phandle of the device tree node at + * structure block offset nodeoffset. + * + * returns: + * the phandle of the node at nodeoffset, on success (!= 0, != -1) + * 0, if the node has no phandle, or another error occurs + */ +uint32_t fdt_get_phandle(const void *fdt, int nodeoffset); + +/** + * fdt_get_alias_namelen - get alias based on substring + * @fdt: pointer to the device tree blob + * @name: name of the alias th look up + * @namelen: number of characters of name to consider + * + * Identical to fdt_get_alias(), but only examine the first namelen + * characters of name for matching the alias name. + */ +#ifndef SWIG /* Not available in Python */ +const char *fdt_get_alias_namelen(const void *fdt, + const char *name, int namelen); +#endif + +/** + * fdt_get_alias - retrieve the path referenced by a given alias + * @fdt: pointer to the device tree blob + * @name: name of the alias th look up + * + * fdt_get_alias() retrieves the value of a given alias. That is, the + * value of the property named 'name' in the node /aliases. + * + * returns: + * a pointer to the expansion of the alias named 'name', if it exists + * NULL, if the given alias or the /aliases node does not exist + */ +const char *fdt_get_alias(const void *fdt, const char *name); + +/** + * fdt_get_path - determine the full path of a node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose path to find + * @buf: character buffer to contain the returned path (will be overwritten) + * @buflen: size of the character buffer at buf + * + * fdt_get_path() computes the full path of the node at offset + * nodeoffset, and records that path in the buffer at buf. + * + * NOTE: This function is expensive, as it must scan the device tree + * structure from the start to nodeoffset. + * + * returns: + * 0, on success + * buf contains the absolute path of the node at + * nodeoffset, as a NUL-terminated string. + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_NOSPACE, the path of the given node is longer than (bufsize-1) + * characters and will not fit in the given buffer. + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen); + +/** + * fdt_supernode_atdepth_offset - find a specific ancestor of a node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose parent to find + * @supernodedepth: depth of the ancestor to find + * @nodedepth: pointer to an integer variable (will be overwritten) or NULL + * + * fdt_supernode_atdepth_offset() finds an ancestor of the given node + * at a specific depth from the root (where the root itself has depth + * 0, its immediate subnodes depth 1 and so forth). So + * fdt_supernode_atdepth_offset(fdt, nodeoffset, 0, NULL); + * will always return 0, the offset of the root node. If the node at + * nodeoffset has depth D, then: + * fdt_supernode_atdepth_offset(fdt, nodeoffset, D, NULL); + * will return nodeoffset itself. + * + * NOTE: This function is expensive, as it must scan the device tree + * structure from the start to nodeoffset. + * + * returns: + * structure block offset of the node at node offset's ancestor + * of depth supernodedepth (>=0), on success + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_NOTFOUND, supernodedepth was greater than the depth of + * nodeoffset + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset, + int supernodedepth, int *nodedepth); + +/** + * fdt_node_depth - find the depth of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose parent to find + * + * fdt_node_depth() finds the depth of a given node. The root node + * has depth 0, its immediate subnodes depth 1 and so forth. + * + * NOTE: This function is expensive, as it must scan the device tree + * structure from the start to nodeoffset. + * + * returns: + * depth of the node at nodeoffset (>=0), on success + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_depth(const void *fdt, int nodeoffset); + +/** + * fdt_parent_offset - find the parent of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose parent to find + * + * fdt_parent_offset() locates the parent node of a given node (that + * is, it finds the offset of the node which contains the node at + * nodeoffset as a subnode). + * + * NOTE: This function is expensive, as it must scan the device tree + * structure from the start to nodeoffset, *twice*. + * + * returns: + * structure block offset of the parent of the node at nodeoffset + * (>=0), on success + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_parent_offset(const void *fdt, int nodeoffset); + +/** + * fdt_node_offset_by_prop_value - find nodes with a given property value + * @fdt: pointer to the device tree blob + * @startoffset: only find nodes after this offset + * @propname: property name to check + * @propval: property value to search for + * @proplen: length of the value in propval + * + * fdt_node_offset_by_prop_value() returns the offset of the first + * node after startoffset, which has a property named propname whose + * value is of length proplen and has value equal to propval; or if + * startoffset is -1, the very first such node in the tree. + * + * To iterate through all nodes matching the criterion, the following + * idiom can be used: + * offset = fdt_node_offset_by_prop_value(fdt, -1, propname, + * propval, proplen); + * while (offset != -FDT_ERR_NOTFOUND) { + * // other code here + * offset = fdt_node_offset_by_prop_value(fdt, offset, propname, + * propval, proplen); + * } + * + * Note the -1 in the first call to the function, if 0 is used here + * instead, the function will never locate the root node, even if it + * matches the criterion. + * + * returns: + * structure block offset of the located node (>= 0, >startoffset), + * on success + * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the + * tree after startoffset + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_offset_by_prop_value(const void *fdt, int startoffset, + const char *propname, + const void *propval, int proplen); + +/** + * fdt_node_offset_by_phandle - find the node with a given phandle + * @fdt: pointer to the device tree blob + * @phandle: phandle value + * + * fdt_node_offset_by_phandle() returns the offset of the node + * which has the given phandle value. If there is more than one node + * in the tree with the given phandle (an invalid tree), results are + * undefined. + * + * returns: + * structure block offset of the located node (>= 0), on success + * -FDT_ERR_NOTFOUND, no node with that phandle exists + * -FDT_ERR_BADPHANDLE, given phandle value was invalid (0 or -1) + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle); + +/** + * fdt_node_check_compatible: check a node's compatible property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of a tree node + * @compatible: string to match against + * + * + * fdt_node_check_compatible() returns 0 if the given node contains a + * 'compatible' property with the given string as one of its elements, + * it returns non-zero otherwise, or on error. + * + * returns: + * 0, if the node has a 'compatible' property listing the given string + * 1, if the node has a 'compatible' property, but it does not list + * the given string + * -FDT_ERR_NOTFOUND, if the given node has no 'compatible' property + * -FDT_ERR_BADOFFSET, if nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_check_compatible(const void *fdt, int nodeoffset, + const char *compatible); + +/** + * fdt_node_offset_by_compatible - find nodes with a given 'compatible' value + * @fdt: pointer to the device tree blob + * @startoffset: only find nodes after this offset + * @compatible: 'compatible' string to match against + * + * fdt_node_offset_by_compatible() returns the offset of the first + * node after startoffset, which has a 'compatible' property which + * lists the given compatible string; or if startoffset is -1, the + * very first such node in the tree. + * + * To iterate through all nodes matching the criterion, the following + * idiom can be used: + * offset = fdt_node_offset_by_compatible(fdt, -1, compatible); + * while (offset != -FDT_ERR_NOTFOUND) { + * // other code here + * offset = fdt_node_offset_by_compatible(fdt, offset, compatible); + * } + * + * Note the -1 in the first call to the function, if 0 is used here + * instead, the function will never locate the root node, even if it + * matches the criterion. + * + * returns: + * structure block offset of the located node (>= 0, >startoffset), + * on success + * -FDT_ERR_NOTFOUND, no node matching the criterion exists in the + * tree after startoffset + * -FDT_ERR_BADOFFSET, nodeoffset does not refer to a BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, standard meanings + */ +int fdt_node_offset_by_compatible(const void *fdt, int startoffset, + const char *compatible); + +/** + * fdt_stringlist_contains - check a string list property for a string + * @strlist: Property containing a list of strings to check + * @listlen: Length of property + * @str: String to search for + * + * This is a utility function provided for convenience. The list contains + * one or more strings, each terminated by \0, as is found in a device tree + * "compatible" property. + * + * @return: 1 if the string is found in the list, 0 not found, or invalid list + */ +int fdt_stringlist_contains(const char *strlist, int listlen, const char *str); + +/** + * fdt_stringlist_count - count the number of strings in a string list + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of a tree node + * @property: name of the property containing the string list + * @return: + * the number of strings in the given property + * -FDT_ERR_BADVALUE if the property value is not NUL-terminated + * -FDT_ERR_NOTFOUND if the property does not exist + */ +int fdt_stringlist_count(const void *fdt, int nodeoffset, const char *property); + +/** + * fdt_stringlist_search - find a string in a string list and return its index + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of a tree node + * @property: name of the property containing the string list + * @string: string to look up in the string list + * + * Note that it is possible for this function to succeed on property values + * that are not NUL-terminated. That's because the function will stop after + * finding the first occurrence of @string. This can for example happen with + * small-valued cell properties, such as #address-cells, when searching for + * the empty string. + * + * @return: + * the index of the string in the list of strings + * -FDT_ERR_BADVALUE if the property value is not NUL-terminated + * -FDT_ERR_NOTFOUND if the property does not exist or does not contain + * the given string + */ +int fdt_stringlist_search(const void *fdt, int nodeoffset, const char *property, + const char *string); + +/** + * fdt_stringlist_get() - obtain the string at a given index in a string list + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of a tree node + * @property: name of the property containing the string list + * @index: index of the string to return + * @lenp: return location for the string length or an error code on failure + * + * Note that this will successfully extract strings from properties with + * non-NUL-terminated values. For example on small-valued cell properties + * this function will return the empty string. + * + * If non-NULL, the length of the string (on success) or a negative error-code + * (on failure) will be stored in the integer pointer to by lenp. + * + * @return: + * A pointer to the string at the given index in the string list or NULL on + * failure. On success the length of the string will be stored in the memory + * location pointed to by the lenp parameter, if non-NULL. On failure one of + * the following negative error codes will be returned in the lenp parameter + * (if non-NULL): + * -FDT_ERR_BADVALUE if the property value is not NUL-terminated + * -FDT_ERR_NOTFOUND if the property does not exist + */ +const char *fdt_stringlist_get(const void *fdt, int nodeoffset, + const char *property, int index, + int *lenp); + +/**********************************************************************/ +/* Read-only functions (addressing related) */ +/**********************************************************************/ + +/** + * FDT_MAX_NCELLS - maximum value for #address-cells and #size-cells + * + * This is the maximum value for #address-cells, #size-cells and + * similar properties that will be processed by libfdt. IEE1275 + * requires that OF implementations handle values up to 4. + * Implementations may support larger values, but in practice higher + * values aren't used. + */ +#define FDT_MAX_NCELLS 4 + +/** + * fdt_address_cells - retrieve address size for a bus represented in the tree + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to find the address size for + * + * When the node has a valid #address-cells property, returns its value. + * + * returns: + * 0 <= n < FDT_MAX_NCELLS, on success + * 2, if the node has no #address-cells property + * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid + * #address-cells property + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_address_cells(const void *fdt, int nodeoffset); + +/** + * fdt_size_cells - retrieve address range size for a bus represented in the + * tree + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to find the address range size for + * + * When the node has a valid #size-cells property, returns its value. + * + * returns: + * 0 <= n < FDT_MAX_NCELLS, on success + * 1, if the node has no #size-cells property + * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid + * #size-cells property + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_size_cells(const void *fdt, int nodeoffset); + + +/**********************************************************************/ +/* Write-in-place functions */ +/**********************************************************************/ + +/** + * fdt_setprop_inplace_namelen_partial - change a property's value, + * but not its size + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @namelen: number of characters of name to consider + * @idx: index of the property to change in the array + * @val: pointer to data to replace the property value with + * @len: length of the property value + * + * Identical to fdt_setprop_inplace(), but modifies the given property + * starting from the given index, and using only the first characters + * of the name. It is useful when you want to manipulate only one value of + * an array and you have a string that doesn't end with \0. + */ +#ifndef SWIG /* Not available in Python */ +int fdt_setprop_inplace_namelen_partial(void *fdt, int nodeoffset, + const char *name, int namelen, + uint32_t idx, const void *val, + int len); +#endif + +/** + * fdt_setprop_inplace - change a property's value, but not its size + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: pointer to data to replace the property value with + * @len: length of the property value + * + * fdt_setprop_inplace() replaces the value of a given property with + * the data in val, of length len. This function cannot change the + * size of a property, and so will only work if len is equal to the + * current length of the property. + * + * This function will alter only the bytes in the blob which contain + * the given property value, and will not alter or move any other part + * of the tree. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, if len is not equal to the property's current length + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#ifndef SWIG /* Not available in Python */ +int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name, + const void *val, int len); +#endif + +/** + * fdt_setprop_inplace_u32 - change the value of a 32-bit integer property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 32-bit integer value to replace the property with + * + * fdt_setprop_inplace_u32() replaces the value of a given property + * with the 32-bit integer value in val, converting val to big-endian + * if necessary. This function cannot change the size of a property, + * and so will only work if the property already exists and has length + * 4. + * + * This function will alter only the bytes in the blob which contain + * the given property value, and will not alter or move any other part + * of the tree. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, if the property's length is not equal to 4 + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset, + const char *name, uint32_t val) +{ + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_setprop_inplace_u64 - change the value of a 64-bit integer property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 64-bit integer value to replace the property with + * + * fdt_setprop_inplace_u64() replaces the value of a given property + * with the 64-bit integer value in val, converting val to big-endian + * if necessary. This function cannot change the size of a property, + * and so will only work if the property already exists and has length + * 8. + * + * This function will alter only the bytes in the blob which contain + * the given property value, and will not alter or move any other part + * of the tree. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, if the property's length is not equal to 8 + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_setprop_inplace_u64(void *fdt, int nodeoffset, + const char *name, uint64_t val) +{ + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_setprop_inplace_cell - change the value of a single-cell property + * + * This is an alternative name for fdt_setprop_inplace_u32() + */ +static inline int fdt_setprop_inplace_cell(void *fdt, int nodeoffset, + const char *name, uint32_t val) +{ + return fdt_setprop_inplace_u32(fdt, nodeoffset, name, val); +} + +/** + * fdt_nop_property - replace a property with nop tags + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to nop + * @name: name of the property to nop + * + * fdt_nop_property() will replace a given property's representation + * in the blob with FDT_NOP tags, effectively removing it from the + * tree. + * + * This function will alter only the bytes in the blob which contain + * the property, and will not alter or move any other part of the + * tree. + * + * returns: + * 0, on success + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_nop_property(void *fdt, int nodeoffset, const char *name); + +/** + * fdt_nop_node - replace a node (subtree) with nop tags + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to nop + * + * fdt_nop_node() will replace a given node's representation in the + * blob, including all its subnodes, if any, with FDT_NOP tags, + * effectively removing it from the tree. + * + * This function will alter only the bytes in the blob which contain + * the node and its properties and subnodes, and will not alter or + * move any other part of the tree. + * + * returns: + * 0, on success + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_nop_node(void *fdt, int nodeoffset); + +/**********************************************************************/ +/* Sequential write functions */ +/**********************************************************************/ + +/* fdt_create_with_flags flags */ +#define FDT_CREATE_FLAG_NO_NAME_DEDUP 0x1 + /* FDT_CREATE_FLAG_NO_NAME_DEDUP: Do not try to de-duplicate property + * names in the fdt. This can result in faster creation times, but + * a larger fdt. */ + +#define FDT_CREATE_FLAGS_ALL (FDT_CREATE_FLAG_NO_NAME_DEDUP) + +/** + * fdt_create_with_flags - begin creation of a new fdt + * @fdt: pointer to memory allocated where fdt will be created + * @bufsize: size of the memory space at fdt + * @flags: a valid combination of FDT_CREATE_FLAG_ flags, or 0. + * + * fdt_create_with_flags() begins the process of creating a new fdt with + * the sequential write interface. + * + * fdt creation process must end with fdt_finished() to produce a valid fdt. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, bufsize is insufficient for a minimal fdt + * -FDT_ERR_BADFLAGS, flags is not valid + */ +int fdt_create_with_flags(void *buf, int bufsize, uint32_t flags); + +/** + * fdt_create - begin creation of a new fdt + * @fdt: pointer to memory allocated where fdt will be created + * @bufsize: size of the memory space at fdt + * + * fdt_create() is equivalent to fdt_create_with_flags() with flags=0. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, bufsize is insufficient for a minimal fdt + */ +int fdt_create(void *buf, int bufsize); + +int fdt_resize(void *fdt, void *buf, int bufsize); +int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size); +int fdt_finish_reservemap(void *fdt); +int fdt_begin_node(void *fdt, const char *name); +int fdt_property(void *fdt, const char *name, const void *val, int len); +static inline int fdt_property_u32(void *fdt, const char *name, uint32_t val) +{ + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_property(fdt, name, &tmp, sizeof(tmp)); +} +static inline int fdt_property_u64(void *fdt, const char *name, uint64_t val) +{ + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_property(fdt, name, &tmp, sizeof(tmp)); +} + +#ifndef SWIG /* Not available in Python */ +static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val) +{ + return fdt_property_u32(fdt, name, val); +} +#endif + +/** + * fdt_property_placeholder - add a new property and return a ptr to its value + * + * @fdt: pointer to the device tree blob + * @name: name of property to add + * @len: length of property value in bytes + * @valp: returns a pointer to where where the value should be placed + * + * returns: + * 0, on success + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_NOSPACE, standard meanings + */ +int fdt_property_placeholder(void *fdt, const char *name, int len, void **valp); + +#define fdt_property_string(fdt, name, str) \ + fdt_property(fdt, name, str, strlen(str)+1) +int fdt_end_node(void *fdt); +int fdt_finish(void *fdt); + +/**********************************************************************/ +/* Read-write functions */ +/**********************************************************************/ + +int fdt_create_empty_tree(void *buf, int bufsize); +int fdt_open_into(const void *fdt, void *buf, int bufsize); +int fdt_pack(void *fdt); + +/** + * fdt_add_mem_rsv - add one memory reserve map entry + * @fdt: pointer to the device tree blob + * @address, @size: 64-bit values (native endian) + * + * Adds a reserve map entry to the given blob reserving a region at + * address address of length size. + * + * This function will insert data into the reserve map and will + * therefore change the indexes of some entries in the table. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new reservation entry + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size); + +/** + * fdt_del_mem_rsv - remove a memory reserve map entry + * @fdt: pointer to the device tree blob + * @n: entry to remove + * + * fdt_del_mem_rsv() removes the n-th memory reserve map entry from + * the blob. + * + * This function will delete data from the reservation table and will + * therefore change the indexes of some entries in the table. + * + * returns: + * 0, on success + * -FDT_ERR_NOTFOUND, there is no entry of the given index (i.e. there + * are less than n+1 reserve map entries) + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_del_mem_rsv(void *fdt, int n); + +/** + * fdt_set_name - change the name of a given node + * @fdt: pointer to the device tree blob + * @nodeoffset: structure block offset of a node + * @name: name to give the node + * + * fdt_set_name() replaces the name (including unit address, if any) + * of the given node with the given string. NOTE: this function can't + * efficiently check if the new name is unique amongst the given + * node's siblings; results are undefined if this function is invoked + * with a name equal to one of the given node's siblings. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob + * to contain the new name + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, standard meanings + */ +int fdt_set_name(void *fdt, int nodeoffset, const char *name); + +/** + * fdt_setprop - create or change a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: pointer to data to set the property value to + * @len: length of the property value + * + * fdt_setprop() sets the value of the named property in the given + * node to the given value and length, creating the property if it + * does not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_setprop(void *fdt, int nodeoffset, const char *name, + const void *val, int len); + +/** + * fdt_setprop_placeholder - allocate space for a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @len: length of the property value + * @prop_data: return pointer to property data + * + * fdt_setprop_placeholer() allocates the named property in the given node. + * If the property exists it is resized. In either case a pointer to the + * property data is returned. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name, + int len, void **prop_data); + +/** + * fdt_setprop_u32 - set a property to a 32-bit integer + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 32-bit integer value for the property (native endian) + * + * fdt_setprop_u32() sets the value of the named property in the given + * node to the given 32-bit integer value (converting to big-endian if + * necessary), or creates a new property with that value if it does + * not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name, + uint32_t val) +{ + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_setprop_u64 - set a property to a 64-bit integer + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 64-bit integer value for the property (native endian) + * + * fdt_setprop_u64() sets the value of the named property in the given + * node to the given 64-bit integer value (converting to big-endian if + * necessary), or creates a new property with that value if it does + * not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_setprop_u64(void *fdt, int nodeoffset, const char *name, + uint64_t val) +{ + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_setprop_cell - set a property to a single cell value + * + * This is an alternative name for fdt_setprop_u32() + */ +static inline int fdt_setprop_cell(void *fdt, int nodeoffset, const char *name, + uint32_t val) +{ + return fdt_setprop_u32(fdt, nodeoffset, name, val); +} + +/** + * fdt_setprop_string - set a property to a string value + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @str: string value for the property + * + * fdt_setprop_string() sets the value of the named property in the + * given node to the given string value (using the length of the + * string to determine the new length of the property), or creates a + * new property with that value if it does not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#define fdt_setprop_string(fdt, nodeoffset, name, str) \ + fdt_setprop((fdt), (nodeoffset), (name), (str), strlen(str)+1) + + +/** + * fdt_setprop_empty - set a property to an empty value + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * + * fdt_setprop_empty() sets the value of the named property in the + * given node to an empty (zero length) value, or creates a new empty + * property if it does not already exist. + * + * This function may insert or delete data from the blob, and will + * therefore change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#define fdt_setprop_empty(fdt, nodeoffset, name) \ + fdt_setprop((fdt), (nodeoffset), (name), NULL, 0) + +/** + * fdt_appendprop - append to or create a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to append to + * @val: pointer to data to append to the property value + * @len: length of the data to append to the property value + * + * fdt_appendprop() appends the value to the named property in the + * given node, creating the property if it does not already exist. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_appendprop(void *fdt, int nodeoffset, const char *name, + const void *val, int len); + +/** + * fdt_appendprop_u32 - append a 32-bit integer value to a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 32-bit integer value to append to the property (native endian) + * + * fdt_appendprop_u32() appends the given 32-bit integer value + * (converting to big-endian if necessary) to the value of the named + * property in the given node, or creates a new property with that + * value if it does not already exist. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_appendprop_u32(void *fdt, int nodeoffset, + const char *name, uint32_t val) +{ + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_appendprop_u64 - append a 64-bit integer value to a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @val: 64-bit integer value to append to the property (native endian) + * + * fdt_appendprop_u64() appends the given 64-bit integer value + * (converting to big-endian if necessary) to the value of the named + * property in the given node, or creates a new property with that + * value if it does not already exist. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +static inline int fdt_appendprop_u64(void *fdt, int nodeoffset, + const char *name, uint64_t val) +{ + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); +} + +/** + * fdt_appendprop_cell - append a single cell value to a property + * + * This is an alternative name for fdt_appendprop_u32() + */ +static inline int fdt_appendprop_cell(void *fdt, int nodeoffset, + const char *name, uint32_t val) +{ + return fdt_appendprop_u32(fdt, nodeoffset, name, val); +} + +/** + * fdt_appendprop_string - append a string to a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to change + * @name: name of the property to change + * @str: string value to append to the property + * + * fdt_appendprop_string() appends the given string to the value of + * the named property in the given node, or creates a new property + * with that value if it does not already exist. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain the new property value + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_TRUNCATED, standard meanings + */ +#define fdt_appendprop_string(fdt, nodeoffset, name, str) \ + fdt_appendprop((fdt), (nodeoffset), (name), (str), strlen(str)+1) + +/** + * fdt_appendprop_addrrange - append a address range property + * @fdt: pointer to the device tree blob + * @parent: offset of the parent node + * @nodeoffset: offset of the node to add a property at + * @name: name of property + * @addr: start address of a given range + * @size: size of a given range + * + * fdt_appendprop_addrrange() appends an address range value (start + * address and size) to the value of the named property in the given + * node, or creates a new property with that value if it does not + * already exist. + * If "name" is not specified, a default "reg" is used. + * Cell sizes are determined by parent's #address-cells and #size-cells. + * + * This function may insert data into the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid + * #address-cells property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADVALUE, addr or size doesn't fit to respective cells size + * -FDT_ERR_NOSPACE, there is insufficient free space in the blob to + * contain a new property + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_appendprop_addrrange(void *fdt, int parent, int nodeoffset, + const char *name, uint64_t addr, uint64_t size); + +/** + * fdt_delprop - delete a property + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node whose property to nop + * @name: name of the property to nop + * + * fdt_del_property() will delete the given property. + * + * This function will delete data from the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_NOTFOUND, node does not have the named property + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_delprop(void *fdt, int nodeoffset, const char *name); + +/** + * fdt_add_subnode_namelen - creates a new node based on substring + * @fdt: pointer to the device tree blob + * @parentoffset: structure block offset of a node + * @name: name of the subnode to locate + * @namelen: number of characters of name to consider + * + * Identical to fdt_add_subnode(), but use only the first namelen + * characters of name as the name of the new node. This is useful for + * creating subnodes based on a portion of a larger string, such as a + * full path. + */ +#ifndef SWIG /* Not available in Python */ +int fdt_add_subnode_namelen(void *fdt, int parentoffset, + const char *name, int namelen); +#endif + +/** + * fdt_add_subnode - creates a new node + * @fdt: pointer to the device tree blob + * @parentoffset: structure block offset of a node + * @name: name of the subnode to locate + * + * fdt_add_subnode() creates a new node as a subnode of the node at + * structure block offset parentoffset, with the given name (which + * should include the unit address, if any). + * + * This function will insert data into the blob, and will therefore + * change the offsets of some existing nodes. + + * returns: + * structure block offset of the created nodeequested subnode (>=0), on + * success + * -FDT_ERR_NOTFOUND, if the requested subnode does not exist + * -FDT_ERR_BADOFFSET, if parentoffset did not point to an FDT_BEGIN_NODE + * tag + * -FDT_ERR_EXISTS, if the node at parentoffset already has a subnode of + * the given name + * -FDT_ERR_NOSPACE, if there is insufficient free space in the + * blob to contain the new node + * -FDT_ERR_NOSPACE + * -FDT_ERR_BADLAYOUT + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings. + */ +int fdt_add_subnode(void *fdt, int parentoffset, const char *name); + +/** + * fdt_del_node - delete a node (subtree) + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to nop + * + * fdt_del_node() will remove the given node, including all its + * subnodes if any, from the blob. + * + * This function will delete data from the blob, and will therefore + * change the offsets of some existing nodes. + * + * returns: + * 0, on success + * -FDT_ERR_BADOFFSET, nodeoffset did not point to FDT_BEGIN_NODE tag + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_del_node(void *fdt, int nodeoffset); + +/** + * fdt_overlay_apply - Applies a DT overlay on a base DT + * @fdt: pointer to the base device tree blob + * @fdto: pointer to the device tree overlay blob + * + * fdt_overlay_apply() will apply the given device tree overlay on the + * given base device tree. + * + * Expect the base device tree to be modified, even if the function + * returns an error. + * + * returns: + * 0, on success + * -FDT_ERR_NOSPACE, there's not enough space in the base device tree + * -FDT_ERR_NOTFOUND, the overlay points to some inexistant nodes or + * properties in the base DT + * -FDT_ERR_BADPHANDLE, + * -FDT_ERR_BADOVERLAY, + * -FDT_ERR_NOPHANDLES, + * -FDT_ERR_INTERNAL, + * -FDT_ERR_BADLAYOUT, + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADOFFSET, + * -FDT_ERR_BADPATH, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_overlay_apply(void *fdt, void *fdto); + +/**********************************************************************/ +/* Debugging / informational functions */ +/**********************************************************************/ + +const char *fdt_strerror(int errval); +#ifdef __cplusplus +} +#endif + +#endif /* LIBFDT_H */ diff --git a/vendor/riscv-isa-sim/fdt/libfdt_env.h b/vendor/riscv-isa-sim/fdt/libfdt_env.h new file mode 100644 index 00000000..44bd12a0 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/libfdt_env.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +#ifndef LIBFDT_ENV_H +#define LIBFDT_ENV_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + * Copyright 2012 Kim Phillips, Freescale Semiconductor. + */ + +#include +#include +#include +#include +#include + +#ifdef __CHECKER__ +#define FDT_FORCE __attribute__((force)) +#define FDT_BITWISE __attribute__((bitwise)) +#else +#define FDT_FORCE +#define FDT_BITWISE +#endif + +typedef uint16_t FDT_BITWISE fdt16_t; +typedef uint32_t FDT_BITWISE fdt32_t; +typedef uint64_t FDT_BITWISE fdt64_t; + +#define EXTRACT_BYTE(x, n) ((unsigned long long)((uint8_t *)&x)[n]) +#define CPU_TO_FDT16(x) ((EXTRACT_BYTE(x, 0) << 8) | EXTRACT_BYTE(x, 1)) +#define CPU_TO_FDT32(x) ((EXTRACT_BYTE(x, 0) << 24) | (EXTRACT_BYTE(x, 1) << 16) | \ + (EXTRACT_BYTE(x, 2) << 8) | EXTRACT_BYTE(x, 3)) +#define CPU_TO_FDT64(x) ((EXTRACT_BYTE(x, 0) << 56) | (EXTRACT_BYTE(x, 1) << 48) | \ + (EXTRACT_BYTE(x, 2) << 40) | (EXTRACT_BYTE(x, 3) << 32) | \ + (EXTRACT_BYTE(x, 4) << 24) | (EXTRACT_BYTE(x, 5) << 16) | \ + (EXTRACT_BYTE(x, 6) << 8) | EXTRACT_BYTE(x, 7)) + +static inline uint16_t fdt16_to_cpu(fdt16_t x) +{ + return (FDT_FORCE uint16_t)CPU_TO_FDT16(x); +} +static inline fdt16_t cpu_to_fdt16(uint16_t x) +{ + return (FDT_FORCE fdt16_t)CPU_TO_FDT16(x); +} + +static inline uint32_t fdt32_to_cpu(fdt32_t x) +{ + return (FDT_FORCE uint32_t)CPU_TO_FDT32(x); +} +static inline fdt32_t cpu_to_fdt32(uint32_t x) +{ + return (FDT_FORCE fdt32_t)CPU_TO_FDT32(x); +} + +static inline uint64_t fdt64_to_cpu(fdt64_t x) +{ + return (FDT_FORCE uint64_t)CPU_TO_FDT64(x); +} +static inline fdt64_t cpu_to_fdt64(uint64_t x) +{ + return (FDT_FORCE fdt64_t)CPU_TO_FDT64(x); +} +#undef CPU_TO_FDT64 +#undef CPU_TO_FDT32 +#undef CPU_TO_FDT16 +#undef EXTRACT_BYTE + +#ifdef __APPLE__ +#include + +/* strnlen() is not available on Mac OS < 10.7 */ +# if !defined(MAC_OS_X_VERSION_10_7) || (MAC_OS_X_VERSION_MAX_ALLOWED < \ + MAC_OS_X_VERSION_10_7) + +#define strnlen fdt_strnlen + +/* + * fdt_strnlen: returns the length of a string or max_count - which ever is + * smallest. + * Input 1 string: the string whose size is to be determined + * Input 2 max_count: the maximum value returned by this function + * Output: length of the string or max_count (the smallest of the two) + */ +static inline size_t fdt_strnlen(const char *string, size_t max_count) +{ + const char *p = memchr(string, 0, max_count); + return p ? p - string : max_count; +} + +#endif /* !defined(MAC_OS_X_VERSION_10_7) || (MAC_OS_X_VERSION_MAX_ALLOWED < + MAC_OS_X_VERSION_10_7) */ + +#endif /* __APPLE__ */ + +#endif /* LIBFDT_ENV_H */ diff --git a/vendor/riscv-isa-sim/fdt/libfdt_internal.h b/vendor/riscv-isa-sim/fdt/libfdt_internal.h new file mode 100644 index 00000000..741eeb31 --- /dev/null +++ b/vendor/riscv-isa-sim/fdt/libfdt_internal.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +#ifndef LIBFDT_INTERNAL_H +#define LIBFDT_INTERNAL_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + */ +#include + +#define FDT_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1)) +#define FDT_TAGALIGN(x) (FDT_ALIGN((x), FDT_TAGSIZE)) + +int fdt_ro_probe_(const void *fdt); +#define FDT_RO_PROBE(fdt) \ + { \ + int totalsize_; \ + if ((totalsize_ = fdt_ro_probe_(fdt)) < 0) \ + return totalsize_; \ + } + +int fdt_check_node_offset_(const void *fdt, int offset); +int fdt_check_prop_offset_(const void *fdt, int offset); +const char *fdt_find_string_(const char *strtab, int tabsize, const char *s); +int fdt_node_end_offset_(void *fdt, int nodeoffset); + +static inline const void *fdt_offset_ptr_(const void *fdt, int offset) +{ + return (const char *)fdt + fdt_off_dt_struct(fdt) + offset; +} + +static inline void *fdt_offset_ptr_w_(void *fdt, int offset) +{ + return (void *)(uintptr_t)fdt_offset_ptr_(fdt, offset); +} + +static inline const struct fdt_reserve_entry *fdt_mem_rsv_(const void *fdt, int n) +{ + const struct fdt_reserve_entry *rsv_table = + (const struct fdt_reserve_entry *) + ((const char *)fdt + fdt_off_mem_rsvmap(fdt)); + + return rsv_table + n; +} +static inline struct fdt_reserve_entry *fdt_mem_rsv_w_(void *fdt, int n) +{ + return (void *)(uintptr_t)fdt_mem_rsv_(fdt, n); +} + +#define FDT_SW_MAGIC (~FDT_MAGIC) + +#endif /* LIBFDT_INTERNAL_H */ diff --git a/vendor/riscv-isa-sim/fesvr/byteorder.h b/vendor/riscv-isa-sim/fesvr/byteorder.h new file mode 100644 index 00000000..2b1dbf98 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/byteorder.h @@ -0,0 +1,94 @@ +// See LICENSE for license details. + +#ifndef _RISCV_BYTEORDER_H +#define _RISCV_BYTEORDER_H + +#include "config.h" +#include + +static inline uint8_t swap(uint8_t n) { return n; } +static inline uint16_t swap(uint16_t n) { return (n >> 8) | (n << 8); } +static inline uint32_t swap(uint32_t n) { return (swap(uint16_t(n)) << 16) | swap(uint16_t(n >> 16)); } +static inline uint64_t swap(uint64_t n) { return (uint64_t(swap(uint32_t(n))) << 32) | swap(uint32_t(n >> 32)); } +static inline int8_t swap(int8_t n) { return n; } +static inline int16_t swap(int16_t n) { return int16_t(swap(uint16_t(n))); } +static inline int32_t swap(int32_t n) { return int32_t(swap(uint32_t(n))); } +static inline int64_t swap(int64_t n) { return int64_t(swap(uint64_t(n))); } + +#ifdef WORDS_BIGENDIAN +template static inline T from_be(T n) { return n; } +template static inline T to_be(T n) { return n; } +template static inline T from_le(T n) { return swap(n); } +template static inline T to_le(T n) { return swap(n); } +#else +template static inline T from_le(T n) { return n; } +template static inline T to_le(T n) { return n; } +template static inline T from_be(T n) { return swap(n); } +template static inline T to_be(T n) { return swap(n); } +#endif + +// Wrapper to mark a value as target endian, to guide conversion code + +template class base_endian { + + protected: + T value; + + base_endian(T n) : value(n) {} + + public: + // Setting to and testing against zero never needs swapping + base_endian() : value(0) {} + bool operator!() { return !value; } + + // Bitwise logic operations can be performed without swapping + base_endian& operator|=(const base_endian& rhs) { value |= rhs.value; return *this; } + base_endian& operator&=(const base_endian& rhs) { value &= rhs.value; return *this; } + base_endian& operator^=(const base_endian& rhs) { value ^= rhs.value; return *this; } + + inline T from_be() { return ::from_be(value); } + inline T from_le() { return ::from_le(value); } +}; + +template class target_endian : public base_endian { + protected: + target_endian(T n) : base_endian(n) {} + + public: + target_endian() {} + + static inline target_endian to_be(T n) { return target_endian(::to_be(n)); } + static inline target_endian to_le(T n) { return target_endian(::to_le(n)); } + + // Useful values over which swapping is identity + static const target_endian zero; + static const target_endian all_ones; +}; + +template const target_endian target_endian::zero = target_endian(T(0)); +template const target_endian target_endian::all_ones = target_endian(~T(0)); + + +// Specializations with implicit conversions (no swap information needed) + +template<> class target_endian : public base_endian { + public: + target_endian() {} + target_endian(uint8_t n) : base_endian(n) {} + operator uint8_t() { return value; } + + static inline target_endian to_be(uint8_t n) { return target_endian(n); } + static inline target_endian to_le(uint8_t n) { return target_endian(n); } +}; + +template<> class target_endian : public base_endian { + public: + target_endian() {} + target_endian(int8_t n) : base_endian(n) {} + operator int8_t() { return value; } + + static inline target_endian to_be(int8_t n) { return target_endian(n); } + static inline target_endian to_le(int8_t n) { return target_endian(n); } +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/context.cc b/vendor/riscv-isa-sim/fesvr/context.cc new file mode 100644 index 00000000..ca738137 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/context.cc @@ -0,0 +1,115 @@ +#include "context.h" +#include +#include +#include + +static __thread context_t* cur; + +context_t::context_t() + : creator(NULL), func(NULL), arg(NULL), +#ifndef USE_UCONTEXT + mutex(PTHREAD_MUTEX_INITIALIZER), + cond(PTHREAD_COND_INITIALIZER), flag(0) +#else + context(new ucontext_t) +#endif +{ +} + +#ifdef USE_UCONTEXT +#ifndef GLIBC_64BIT_PTR_BUG +void context_t::wrapper(context_t* ctx) +{ +#else +void context_t::wrapper(unsigned int hi, unsigned int lo) +{ + context_t* ctx = reinterpret_cast(static_cast(lo) | (static_cast(hi) << 32)); +#endif + ctx->creator->switch_to(); + ctx->func(ctx->arg); +} +#else +void* context_t::wrapper(void* a) +{ + context_t* ctx = static_cast(a); + cur = ctx; + ctx->creator->switch_to(); + + ctx->func(ctx->arg); + return NULL; +} +#endif + +void context_t::init(void (*f)(void*), void* a) +{ + func = f; + arg = a; + creator = current(); + +#ifdef USE_UCONTEXT + getcontext(context.get()); + context->uc_link = creator->context.get(); + context->uc_stack.ss_size = 64*1024; + context->uc_stack.ss_sp = new void*[context->uc_stack.ss_size/sizeof(void*)]; +#ifndef GLIBC_64BIT_PTR_BUG + makecontext(context.get(), (void(*)(void))&context_t::wrapper, 1, this); +#else + unsigned int hi(reinterpret_cast(this) >> 32); + unsigned int lo(reinterpret_cast(this)); + makecontext(context.get(), (void(*)(void))&context_t::wrapper, 2, hi, lo); +#endif + switch_to(); +#else + assert(flag == 0); + + pthread_mutex_lock(&creator->mutex); + creator->flag = 0; + if (pthread_create(&thread, NULL, &context_t::wrapper, this) != 0) + abort(); + pthread_detach(thread); + while (!creator->flag) + pthread_cond_wait(&creator->cond, &creator->mutex); + pthread_mutex_unlock(&creator->mutex); +#endif +} + +context_t::~context_t() +{ + assert(this != cur); +} + +void context_t::switch_to() +{ + assert(this != cur); +#ifdef USE_UCONTEXT + context_t* prev = cur; + cur = this; + if (swapcontext(prev->context.get(), context.get()) != 0) + abort(); +#else + cur->flag = 0; + this->flag = 1; + pthread_mutex_lock(&this->mutex); + pthread_cond_signal(&this->cond); + pthread_mutex_unlock(&this->mutex); + pthread_mutex_lock(&cur->mutex); + while (!cur->flag) + pthread_cond_wait(&cur->cond, &cur->mutex); + pthread_mutex_unlock(&cur->mutex); +#endif +} + +context_t* context_t::current() +{ + if (cur == NULL) + { + cur = new context_t; +#ifdef USE_UCONTEXT + getcontext(cur->context.get()); +#else + cur->thread = pthread_self(); + cur->flag = 1; +#endif + } + return cur; +} diff --git a/vendor/riscv-isa-sim/fesvr/context.h b/vendor/riscv-isa-sim/fesvr/context.h new file mode 100644 index 00000000..18bf50ef --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/context.h @@ -0,0 +1,54 @@ +#ifndef _HTIF_CONTEXT_H +#define _HTIF_CONTEXT_H + +// A replacement for ucontext.h, which is sadly deprecated. + +#include + +#if defined(__GLIBC__) +# undef USE_UCONTEXT +# define USE_UCONTEXT +# include +# include +#include + +#if (ULONG_MAX > UINT_MAX) // 64-bit systems only +#if (100*GLIB_MAJOR_VERSION+GLIB_MINOR_VERSION < 208) +#define GLIBC_64BIT_PTR_BUG +static_assert (sizeof(unsigned int) == 4, "uint size doesn't match expected 32bit"); +static_assert (sizeof(unsigned long) == 8, "ulong size doesn't match expected 64bit"); +static_assert (sizeof(void*) == 8, "ptr size doesn't match expected 64bit"); +#endif +#endif /* ULONG_MAX > UINT_MAX */ + +#endif + +class context_t +{ + public: + context_t(); + ~context_t(); + void init(void (*func)(void*), void* arg); + void switch_to(); + static context_t* current(); + private: + context_t* creator; + void (*func)(void*); + void* arg; +#ifdef USE_UCONTEXT + std::unique_ptr context; +#ifndef GLIBC_64BIT_PTR_BUG + static void wrapper(context_t*); +#else + static void wrapper(unsigned int, unsigned int); +#endif +#else + pthread_t thread; + pthread_mutex_t mutex; + pthread_cond_t cond; + volatile int flag; + static void* wrapper(void*); +#endif +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/debug_defines.h b/vendor/riscv-isa-sim/fesvr/debug_defines.h new file mode 100644 index 00000000..e5f92910 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/debug_defines.h @@ -0,0 +1,1418 @@ +#define DTM_IDCODE 0x01 +/* +* Identifies the release version of this part. + */ +#define DTM_IDCODE_VERSION_OFFSET 28 +#define DTM_IDCODE_VERSION_LENGTH 4 +#define DTM_IDCODE_VERSION (0xf << DTM_IDCODE_VERSION_OFFSET) +/* +* Identifies the designer's part number of this part. + */ +#define DTM_IDCODE_PARTNUMBER_OFFSET 12 +#define DTM_IDCODE_PARTNUMBER_LENGTH 16 +#define DTM_IDCODE_PARTNUMBER (0xffff << DTM_IDCODE_PARTNUMBER_OFFSET) +/* +* Identifies the designer/manufacturer of this part. Bits 6:0 must be +* bits 6:0 of the designer/manufacturer's Identification Code as +* assigned by JEDEC Standard JEP106. Bits 10:7 contain the modulo-16 +* count of the number of continuation characters (0x7f) in that same +* Identification Code. + */ +#define DTM_IDCODE_MANUFID_OFFSET 1 +#define DTM_IDCODE_MANUFID_LENGTH 11 +#define DTM_IDCODE_MANUFID (0x7ff << DTM_IDCODE_MANUFID_OFFSET) +#define DTM_IDCODE_1_OFFSET 0 +#define DTM_IDCODE_1_LENGTH 1 +#define DTM_IDCODE_1 (0x1 << DTM_IDCODE_1_OFFSET) +#define DTM_DTMCS 0x10 +/* +* Writing 1 to this bit does a hard reset of the DTM, +* causing the DTM to forget about any outstanding DMI transactions. +* In general this should only be used when the Debugger has +* reason to expect that the outstanding DMI transaction will never +* complete (e.g. a reset condition caused an inflight DMI transaction to +* be cancelled). + */ +#define DTM_DTMCS_DMIHARDRESET_OFFSET 17 +#define DTM_DTMCS_DMIHARDRESET_LENGTH 1 +#define DTM_DTMCS_DMIHARDRESET (0x1 << DTM_DTMCS_DMIHARDRESET_OFFSET) +/* +* Writing 1 to this bit clears the sticky error state +* and allows the DTM to retry or complete the previous +* transaction. + */ +#define DTM_DTMCS_DMIRESET_OFFSET 16 +#define DTM_DTMCS_DMIRESET_LENGTH 1 +#define DTM_DTMCS_DMIRESET (0x1 << DTM_DTMCS_DMIRESET_OFFSET) +/* +* This is a hint to the debugger of the minimum number of +* cycles a debugger should spend in +* Run-Test/Idle after every DMI scan to avoid a `busy' +* return code (\Fdmistat of 3). A debugger must still +* check \Fdmistat when necessary. +* +* 0: It is not necessary to enter Run-Test/Idle at all. +* +* 1: Enter Run-Test/Idle and leave it immediately. +* +* 2: Enter Run-Test/Idle and stay there for 1 cycle before leaving. +* +* And so on. + */ +#define DTM_DTMCS_IDLE_OFFSET 12 +#define DTM_DTMCS_IDLE_LENGTH 3 +#define DTM_DTMCS_IDLE (0x7 << DTM_DTMCS_IDLE_OFFSET) +/* +* 0: No error. +* +* 1: Reserved. Interpret the same as 2. +* +* 2: An operation failed (resulted in \Fop of 2). +* +* 3: An operation was attempted while a DMI access was still in +* progress (resulted in \Fop of 3). + */ +#define DTM_DTMCS_DMISTAT_OFFSET 10 +#define DTM_DTMCS_DMISTAT_LENGTH 2 +#define DTM_DTMCS_DMISTAT (0x3 << DTM_DTMCS_DMISTAT_OFFSET) +/* +* The size of \Faddress in \Rdmi. + */ +#define DTM_DTMCS_ABITS_OFFSET 4 +#define DTM_DTMCS_ABITS_LENGTH 6 +#define DTM_DTMCS_ABITS (0x3f << DTM_DTMCS_ABITS_OFFSET) +/* +* 0: Version described in spec version 0.11. +* +* 1: Version described in spec version 0.13 (and later?), which +* reduces the DMI data width to 32 bits. +* +* Other values are reserved for future use. + */ +#define DTM_DTMCS_VERSION_OFFSET 0 +#define DTM_DTMCS_VERSION_LENGTH 4 +#define DTM_DTMCS_VERSION (0xf << DTM_DTMCS_VERSION_OFFSET) +#define DTM_DMI 0x11 +/* +* Address used for DMI access. In Update-DR this value is used +* to access the DM over the DMI. + */ +#define DTM_DMI_ADDRESS_OFFSET 34 +#define DTM_DMI_ADDRESS_LENGTH abits +#define DTM_DMI_ADDRESS (((1L< +#include +#include +#include +#include +#include +#include +#include +using namespace std::placeholders; + +device_t::device_t() + : command_handlers(command_t::MAX_COMMANDS), + command_names(command_t::MAX_COMMANDS) +{ + for (size_t cmd = 0; cmd < command_t::MAX_COMMANDS; cmd++) + register_command(cmd, std::bind(&device_t::handle_null_command, this, _1), ""); + register_command(command_t::MAX_COMMANDS-1, std::bind(&device_t::handle_identify, this, _1), "identity"); +} + +void device_t::register_command(size_t cmd, command_func_t handler, const char* name) +{ + assert(cmd < command_t::MAX_COMMANDS); + assert(strlen(name) < IDENTITY_SIZE); + command_handlers[cmd] = handler; + command_names[cmd] = name; +} + +void device_t::handle_command(command_t cmd) +{ + command_handlers[cmd.cmd()](cmd); +} + +void device_t::handle_null_command(command_t cmd) +{ +} + +void device_t::handle_identify(command_t cmd) +{ + size_t what = cmd.payload() % command_t::MAX_COMMANDS; + uint64_t addr = cmd.payload() / command_t::MAX_COMMANDS; + + char id[IDENTITY_SIZE] = {0}; + if (what == command_t::MAX_COMMANDS-1) + { + assert(strlen(identity()) < IDENTITY_SIZE); + strcpy(id, identity()); + } + else + strcpy(id, command_names[what].c_str()); + + cmd.memif().write(addr, IDENTITY_SIZE, id); + cmd.respond(1); +} + +bcd_t::bcd_t() +{ + register_command(0, std::bind(&bcd_t::handle_read, this, _1), "read"); + register_command(1, std::bind(&bcd_t::handle_write, this, _1), "write"); +} + +void bcd_t::handle_read(command_t cmd) +{ + pending_reads.push(cmd); +} + +void bcd_t::handle_write(command_t cmd) +{ + canonical_terminal_t::write(cmd.payload()); +} + +void bcd_t::tick() +{ + int ch; + if (!pending_reads.empty() && (ch = canonical_terminal_t::read()) != -1) + { + pending_reads.front().respond(0x100 | ch); + pending_reads.pop(); + } +} + +disk_t::disk_t(const char* fn) +{ + fd = ::open(fn, O_RDWR); + if (fd < 0) + throw std::runtime_error("could not open " + std::string(fn)); + + register_command(0, std::bind(&disk_t::handle_read, this, _1), "read"); + register_command(1, std::bind(&disk_t::handle_write, this, _1), "write"); + + struct stat st; + if (fstat(fd, &st) < 0) + throw std::runtime_error("could not stat " + std::string(fn)); + + size = st.st_size; + id = "disk size=" + std::to_string(size); +} + +disk_t::~disk_t() +{ + close(fd); +} + +void disk_t::handle_read(command_t cmd) +{ + request_t req; + cmd.memif().read(cmd.payload(), sizeof(req), &req); + + std::vector buf(req.size); + if ((size_t)::pread(fd, buf.data(), buf.size(), req.offset) != req.size) + throw std::runtime_error("could not read " + id + " @ " + std::to_string(req.offset)); + + cmd.memif().write(req.addr, buf.size(), buf.data()); + cmd.respond(req.tag); +} + +void disk_t::handle_write(command_t cmd) +{ + request_t req; + cmd.memif().read(cmd.payload(), sizeof(req), &req); + + std::vector buf(req.size); + cmd.memif().read(req.addr, buf.size(), buf.data()); + + if ((size_t)::pwrite(fd, buf.data(), buf.size(), req.offset) != req.size) + throw std::runtime_error("could not write " + id + " @ " + std::to_string(req.offset)); + + cmd.respond(req.tag); +} + +device_list_t::device_list_t() + : devices(command_t::MAX_COMMANDS, &null_device), num_devices(0) +{ +} + +void device_list_t::register_device(device_t* dev) +{ + num_devices++; + assert(num_devices < command_t::MAX_DEVICES); + devices[num_devices-1] = dev; +} + +void device_list_t::handle_command(command_t cmd) +{ + devices[cmd.device()]->handle_command(cmd); +} + +void device_list_t::tick() +{ + for (size_t i = 0; i < num_devices; i++) + devices[i]->tick(); +} diff --git a/vendor/riscv-isa-sim/fesvr/device.h b/vendor/riscv-isa-sim/fesvr/device.h new file mode 100644 index 00000000..1387b745 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/device.h @@ -0,0 +1,118 @@ +#ifndef _DEVICE_H +#define _DEVICE_H + +#include +#include +#include +#include +#include + +class memif_t; + +class command_t +{ + public: + typedef std::function callback_t; + command_t(memif_t& memif, uint64_t tohost, callback_t cb) + : _memif(memif), tohost(tohost), cb(cb) {} + + memif_t& memif() { return _memif; } + uint8_t device() { return tohost >> 56; } + uint8_t cmd() { return tohost >> 48; } + uint64_t payload() { return tohost << 16 >> 16; } + void respond(uint64_t resp) { cb((tohost >> 48 << 48) | (resp << 16 >> 16)); } + + static const size_t MAX_COMMANDS = 256; + static const size_t MAX_DEVICES = 256; + + private: + memif_t& _memif; + uint64_t tohost; + callback_t cb; +}; + +class device_t +{ + public: + device_t(); + virtual ~device_t() {} + virtual const char* identity() = 0; + virtual void tick() {} + + void handle_command(command_t cmd); + + protected: + typedef std::function command_func_t; + void register_command(size_t, command_func_t, const char*); + + private: + device_t& operator = (const device_t&); // disallow + device_t(const device_t&); // disallow + + static const size_t IDENTITY_SIZE = 64; + void handle_null_command(command_t cmd); + void handle_identify(command_t cmd); + + std::vector command_handlers; + std::vector command_names; +}; + +class bcd_t : public device_t +{ + public: + bcd_t(); + const char* identity() { return "bcd"; } + void tick(); + + private: + void handle_read(command_t cmd); + void handle_write(command_t cmd); + + std::queue pending_reads; +}; + +class disk_t : public device_t +{ + public: + disk_t(const char* fn); + ~disk_t(); + const char* identity() { return id.c_str(); } + + private: + struct request_t + { + uint64_t addr; + uint64_t offset; + uint64_t size; + uint64_t tag; + }; + + void handle_read(command_t cmd); + void handle_write(command_t cmd); + + std::string id; + size_t size; + int fd; +}; + +class null_device_t : public device_t +{ + public: + const char* identity() { return ""; } +}; + +class device_list_t +{ + public: + device_list_t(); + void register_device(device_t* dev); + void handle_command(command_t cmd); + void tick(); + + private: + std::vector devices; + null_device_t null_device; + size_t num_devices; +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/dtm.cc b/vendor/riscv-isa-sim/fesvr/dtm.cc new file mode 100644 index 00000000..b5de14c0 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/dtm.cc @@ -0,0 +1,644 @@ +#include "dtm.h" +#include "debug_defines.h" +#include +#include +#include +#include +#include +#include + +#define RV_X(x, s, n) \ + (((x) >> (s)) & ((1 << (n)) - 1)) +#define ENCODE_ITYPE_IMM(x) \ + (RV_X(x, 0, 12) << 20) +#define ENCODE_STYPE_IMM(x) \ + ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25)) +#define ENCODE_SBTYPE_IMM(x) \ + ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31)) +#define ENCODE_UTYPE_IMM(x) \ + (RV_X(x, 12, 20) << 12) +#define ENCODE_UJTYPE_IMM(x) \ + ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31)) + +#define LOAD(xlen, dst, base, imm) \ + (((xlen) == 64 ? 0x00003003 : 0x00002003) \ + | ((dst) << 7) | ((base) << 15) | (uint32_t)ENCODE_ITYPE_IMM(imm)) +#define STORE(xlen, src, base, imm) \ + (((xlen) == 64 ? 0x00003023 : 0x00002023) \ + | ((src) << 20) | ((base) << 15) | (uint32_t)ENCODE_STYPE_IMM(imm)) +#define JUMP(there, here) (0x6f | (uint32_t)ENCODE_UJTYPE_IMM((there) - (here))) +#define BNE(r1, r2, there, here) (0x1063 | ((r1) << 15) | ((r2) << 20) | (uint32_t)ENCODE_SBTYPE_IMM((there) - (here))) +#define ADDI(dst, src, imm) (0x13 | ((dst) << 7) | ((src) << 15) | (uint32_t)ENCODE_ITYPE_IMM(imm)) +#define SRL(dst, src, sh) (0x5033 | ((dst) << 7) | ((src) << 15) | ((sh) << 20)) +#define FENCE_I 0x100f +#define EBREAK 0x00100073 +#define X0 0 +#define S0 8 +#define S1 9 + +#define AC_AR_REGNO(x) ((0x1000 | x) << AC_ACCESS_REGISTER_REGNO_OFFSET) +#define AC_AR_SIZE(x) (((x == 128)? 4 : (x == 64 ? 3 : 2)) << AC_ACCESS_REGISTER_SIZE_OFFSET) + +#define WRITE 1 +#define SET 2 +#define CLEAR 3 +#define CSRRx(type, dst, csr, src) (0x73 | ((type) << 12) | ((dst) << 7) | ((src) << 15) | (uint32_t)((csr) << 20)) + +#define get_field(reg, mask) (((reg) & (mask)) / ((mask) & ~((mask) << 1))) +#define set_field(reg, mask, val) (((reg) & ~(mask)) | (((val) * ((mask) & ~((mask) << 1))) & (mask))) + +#define RUN_AC_OR_DIE(a, b, c, d, e) { \ + uint32_t cmderr = run_abstract_command(a, b, c, d, e); \ + if (cmderr) { \ + die(cmderr); \ + } \ + } + +uint32_t dtm_t::do_command(dtm_t::req r) +{ + req_buf = r; + target->switch_to(); + assert(resp_buf.resp == 0); + return resp_buf.data; +} + +uint32_t dtm_t::read(uint32_t addr) +{ + return do_command((req){addr, 1, 0}); +} + +uint32_t dtm_t::write(uint32_t addr, uint32_t data) +{ + return do_command((req){addr, 2, data}); +} + +void dtm_t::nop() +{ + do_command((req){0, 0, 0}); +} + +void dtm_t::select_hart(int hartsel) { + int dmcontrol = read(DMI_DMCONTROL); + write (DMI_DMCONTROL, set_field(dmcontrol, DMI_DMCONTROL_HARTSEL, hartsel)); + current_hart = hartsel; +} + +int dtm_t::enumerate_harts() { + int max_hart = (1 << DMI_DMCONTROL_HARTSEL_LENGTH) - 1; + write(DMI_DMCONTROL, set_field(read(DMI_DMCONTROL), DMI_DMCONTROL_HARTSEL, max_hart)); + read(DMI_DMSTATUS); + max_hart = get_field(read(DMI_DMCONTROL), DMI_DMCONTROL_HARTSEL); + + int hartsel; + for (hartsel = 0; hartsel <= max_hart; hartsel++) { + select_hart(hartsel); + int dmstatus = read(DMI_DMSTATUS); + if (get_field(dmstatus, DMI_DMSTATUS_ANYNONEXISTENT)) + break; + } + return hartsel; +} + +void dtm_t::halt(int hartsel) +{ + if (running) { + write(DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE); + // Read dmstatus to avoid back-to-back writes to dmcontrol. + read(DMI_DMSTATUS); + } + + int dmcontrol = DMI_DMCONTROL_HALTREQ | DMI_DMCONTROL_DMACTIVE; + dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HARTSEL, hartsel); + write(DMI_DMCONTROL, dmcontrol); + int dmstatus; + do { + dmstatus = read(DMI_DMSTATUS); + } while(get_field(dmstatus, DMI_DMSTATUS_ALLHALTED) == 0); + dmcontrol &= ~DMI_DMCONTROL_HALTREQ; + write(DMI_DMCONTROL, dmcontrol); + // Read dmstatus to avoid back-to-back writes to dmcontrol. + read(DMI_DMSTATUS); + current_hart = hartsel; +} + +void dtm_t::resume(int hartsel) +{ + int dmcontrol = DMI_DMCONTROL_RESUMEREQ | DMI_DMCONTROL_DMACTIVE; + dmcontrol = set_field(dmcontrol, DMI_DMCONTROL_HARTSEL, hartsel); + write(DMI_DMCONTROL, dmcontrol); + int dmstatus; + do { + dmstatus = read(DMI_DMSTATUS); + } while (get_field(dmstatus, DMI_DMSTATUS_ALLRESUMEACK) == 0); + dmcontrol &= ~DMI_DMCONTROL_RESUMEREQ; + write(DMI_DMCONTROL, dmcontrol); + // Read dmstatus to avoid back-to-back writes to dmcontrol. + read(DMI_DMSTATUS); + current_hart = hartsel; + + if (running) { + write(DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE); + // Read dmstatus to avoid back-to-back writes to dmcontrol. + read(DMI_DMSTATUS); + } +} + +uint64_t dtm_t::save_reg(unsigned regno) +{ + uint32_t data[xlen/(8*4)]; + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | AC_AR_SIZE(xlen) | AC_AR_REGNO(regno); + RUN_AC_OR_DIE(command, 0, 0, data, xlen / (8*4)); + + uint64_t result = data[0]; + if (xlen > 32) { + result |= ((uint64_t)data[1]) << 32; + } + return result; +} + +void dtm_t::restore_reg(unsigned regno, uint64_t val) +{ + uint32_t data[xlen/(8*4)]; + data[0] = (uint32_t) val; + if (xlen > 32) { + data[1] = (uint32_t) (val >> 32); + } + + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(regno); + + RUN_AC_OR_DIE(command, 0, 0, data, xlen / (8*4)); + +} + +uint32_t dtm_t::run_abstract_command(uint32_t command, + const uint32_t program[], size_t program_n, + uint32_t data[], size_t data_n) +{ + assert(program_n <= ram_words); + assert(data_n <= data_words); + + for (size_t i = 0; i < program_n; i++) { + write(DMI_PROGBUF0 + i, program[i]); + } + + if (get_field(command, AC_ACCESS_REGISTER_WRITE) && + get_field(command, AC_ACCESS_REGISTER_TRANSFER)) { + for (size_t i = 0; i < data_n; i++) { + write(DMI_DATA0 + i, data[i]); + } + } + + write(DMI_COMMAND, command); + + // Wait for not busy and then check for error. + uint32_t abstractcs; + do { + abstractcs = read(DMI_ABSTRACTCS); + } while (abstractcs & DMI_ABSTRACTCS_BUSY); + + if ((get_field(command, AC_ACCESS_REGISTER_WRITE) == 0) && + get_field(command, AC_ACCESS_REGISTER_TRANSFER)) { + for (size_t i = 0; i < data_n; i++){ + data[i] = read(DMI_DATA0 + i); + } + } + + return get_field(abstractcs, DMI_ABSTRACTCS_CMDERR); + +} + +size_t dtm_t::chunk_align() +{ + return xlen / 8; +} + +void dtm_t::read_chunk(uint64_t taddr, size_t len, void* dst) +{ + uint32_t prog[ram_words]; + uint32_t data[data_words]; + + uint8_t * curr = (uint8_t*) dst; + + halt(current_hart); + + uint64_t s0 = save_reg(S0); + uint64_t s1 = save_reg(S1); + + prog[0] = LOAD(xlen, S1, S0, 0); + prog[1] = ADDI(S0, S0, xlen/8); + prog[2] = EBREAK; + + data[0] = (uint32_t) taddr; + if (xlen > 32) { + data[1] = (uint32_t) (taddr >> 32); + } + + // Write s0 with the address, then execute program buffer. + // This will get S1 with the data and increment s0. + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_ACCESS_REGISTER_POSTEXEC | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S0); + + RUN_AC_OR_DIE(command, prog, 3, data, xlen/(4*8)); + + // TODO: could use autoexec here. + for (size_t i = 0; i < (len * 8 / xlen); i++){ + command = AC_ACCESS_REGISTER_TRANSFER | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S1); + if ((i + 1) < (len * 8 / xlen)) { + command |= AC_ACCESS_REGISTER_POSTEXEC; + } + + RUN_AC_OR_DIE(command, 0, 0, data, xlen/(4*8)); + + memcpy(curr, data, xlen/8); + curr += xlen/8; + } + + restore_reg(S0, s0); + restore_reg(S1, s1); + + resume(current_hart); + +} + +void dtm_t::write_chunk(uint64_t taddr, size_t len, const void* src) +{ + uint32_t prog[ram_words]; + uint32_t data[data_words]; + + const uint8_t * curr = (const uint8_t*) src; + + halt(current_hart); + + uint64_t s0 = save_reg(S0); + uint64_t s1 = save_reg(S1); + + prog[0] = STORE(xlen, S1, S0, 0); + prog[1] = ADDI(S0, S0, xlen/8); + prog[2] = EBREAK; + + data[0] = (uint32_t) taddr; + if (xlen > 32) { + data[1] = (uint32_t) (taddr >> 32); + } + + // Write the program (not used yet). + // Write s0 with the address. + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S0); + + RUN_AC_OR_DIE(command, prog, 3, data, xlen/(4*8)); + + // Use Autoexec for more than one word of transfer. + // Write S1 with data, then execution stores S1 to + // 0(S0) and increments S0. + // Each time we write XLEN bits. + memcpy(data, curr, xlen/8); + curr += xlen/8; + + command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_POSTEXEC | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S1); + + RUN_AC_OR_DIE(command, 0, 0, data, xlen/(4*8)); + + uint32_t abstractcs; + for (size_t i = 1; i < (len * 8 / xlen); i++){ + if (i == 1) { + write(DMI_ABSTRACTAUTO, 1 << DMI_ABSTRACTAUTO_AUTOEXECDATA_OFFSET); + } + memcpy(data, curr, xlen/8); + curr += xlen/8; + if (xlen == 64) { + write(DMI_DATA0 + 1, data[1]); + } + write(DMI_DATA0, data[0]); //Triggers a command w/ autoexec. + + do { + abstractcs = read(DMI_ABSTRACTCS); + } while (abstractcs & DMI_ABSTRACTCS_BUSY); + if ( get_field(abstractcs, DMI_ABSTRACTCS_CMDERR)) { + die(get_field(abstractcs, DMI_ABSTRACTCS_CMDERR)); + } + } + if ((len * 8 / xlen) > 1) { + write(DMI_ABSTRACTAUTO, 0); + } + + restore_reg(S0, s0); + restore_reg(S1, s1); + resume(current_hart); +} + +void dtm_t::die(uint32_t cmderr) +{ + const char * codes[] = { + "OK", + "BUSY", + "NOT_SUPPORTED", + "EXCEPTION", + "HALT/RESUME" + }; + const char * msg; + if (cmderr < (sizeof(codes) / sizeof(*codes))){ + msg = codes[cmderr]; + } else { + msg = "OTHER"; + } + //throw std::runtime_error("Debug Abstract Command Error #" + std::to_string(cmderr) + "(" + msg + ")"); + printf("ERROR: %s:%d, Debug Abstract Command Error #%d (%s)", __FILE__, __LINE__, cmderr, msg); + printf("ERROR: %s:%d, Should die, but allowing simulation to continue and fail.", __FILE__, __LINE__); + write(DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR); + +} + +void dtm_t::clear_chunk(uint64_t taddr, size_t len) +{ + uint32_t prog[ram_words]; + uint32_t data[data_words]; + + halt(current_hart); + uint64_t s0 = save_reg(S0); + uint64_t s1 = save_reg(S1); + + uint32_t command; + + // S0 = Addr + data[0] = (uint32_t) taddr; + data[1] = (uint32_t) (taddr >> 32); + command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S0); + RUN_AC_OR_DIE(command, 0, 0, data, xlen/(4*8)); + + // S1 = Addr + len, loop until S0 = S1 + prog[0] = STORE(xlen, X0, S0, 0); + prog[1] = ADDI(S0, S0, xlen/8); + prog[2] = BNE(S0, S1, 0*4, 2*4); + prog[3] = EBREAK; + + data[0] = (uint32_t) (taddr + len); + data[1] = (uint32_t) ((taddr + len) >> 32); + command = AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(S1) | + AC_ACCESS_REGISTER_POSTEXEC; + RUN_AC_OR_DIE(command, prog, 4, data, xlen/(4*8)); + + restore_reg(S0, s0); + restore_reg(S1, s1); + + resume(current_hart); +} + +uint64_t dtm_t::write_csr(unsigned which, uint64_t data) +{ + return modify_csr(which, data, WRITE); +} + +uint64_t dtm_t::set_csr(unsigned which, uint64_t data) +{ + return modify_csr(which, data, SET); +} + +uint64_t dtm_t::clear_csr(unsigned which, uint64_t data) +{ + return modify_csr(which, data, CLEAR); +} + +uint64_t dtm_t::read_csr(unsigned which) +{ + return set_csr(which, 0); +} + +uint64_t dtm_t::modify_csr(unsigned which, uint64_t data, uint32_t type) +{ + halt(current_hart); + + // This code just uses DSCRATCH to save S0 + // and data_base to do the transfer so we don't + // need to run more commands to save and restore + // S0. + uint32_t prog[] = { + CSRRx(WRITE, S0, CSR_DSCRATCH0, S0), + LOAD(xlen, S0, X0, data_base), + CSRRx(type, S0, which, S0), + STORE(xlen, S0, X0, data_base), + CSRRx(WRITE, S0, CSR_DSCRATCH0, S0), + EBREAK + }; + + //TODO: Use transfer = 0. For now both HW and OpenOCD + // ignore transfer bit, so use "store to X0" NOOP. + // We sort of need this anyway because run_abstract_command + // needs the DATA to be written so may as well use the WRITE flag. + + uint32_t adata[] = {(uint32_t) data, + (uint32_t) (data >> 32)}; + + uint32_t command = AC_ACCESS_REGISTER_POSTEXEC | + AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(X0); + + RUN_AC_OR_DIE(command, prog, sizeof(prog) / sizeof(*prog), adata, xlen/(4*8)); + + uint64_t res = read(DMI_DATA0);//adata[0]; + if (xlen == 64) + res |= read(DMI_DATA0 + 1);//((uint64_t) adata[1]) << 32; + + resume(current_hart); + return res; +} + +size_t dtm_t::chunk_max_size() +{ + // Arbitrary choice. 4k Page size seems reasonable. + return 4096; +} + +uint32_t dtm_t::get_xlen() +{ + // Attempt to read S0 to find out what size it is. + // You could also attempt to run code, but you need to save registers + // to do that anyway. If what you really want to do is figure out + // the size of S0 so you can save it later, then do that. + uint32_t command = AC_ACCESS_REGISTER_TRANSFER | AC_AR_REGNO(S0); + uint32_t cmderr; + + const uint32_t prog[] = {}; + uint32_t data[] = {}; + + cmderr = run_abstract_command(command | AC_AR_SIZE(128), prog, 0, data, 0); + if (cmderr == 0){ + throw std::runtime_error("FESVR DTM Does not support 128-bit"); + abort(); + return 128; + } + write(DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR); + + cmderr = run_abstract_command(command | AC_AR_SIZE(64), prog, 0, data, 0); + if (cmderr == 0){ + return 64; + } + write(DMI_ABSTRACTCS, DMI_ABSTRACTCS_CMDERR); + + cmderr = run_abstract_command(command | AC_AR_SIZE(32), prog, 0, data, 0); + if (cmderr == 0){ + return 32; + } + + throw std::runtime_error("FESVR DTM can't determine XLEN. Aborting"); +} + +void dtm_t::fence_i() +{ + halt(current_hart); + + const uint32_t prog[] = { + FENCE_I, + EBREAK + }; + + //TODO: Use the transfer = 0. + uint32_t command = AC_ACCESS_REGISTER_POSTEXEC | + AC_ACCESS_REGISTER_TRANSFER | + AC_ACCESS_REGISTER_WRITE | + AC_AR_SIZE(xlen) | + AC_AR_REGNO(X0); + + RUN_AC_OR_DIE(command, prog, sizeof(prog)/sizeof(*prog), 0, 0); + + resume(current_hart); + +} + +void host_thread_main(void* arg) +{ + ((dtm_t*)arg)->producer_thread(); +} + +void dtm_t::reset() +{ + for (int hartsel = 0; hartsel < num_harts; hartsel ++ ){ + select_hart(hartsel); + // this command also does a halt and resume + fence_i(); + // after this command, the hart will run from _start. + write_csr(0x7b1, get_entry_point()); + } + // In theory any hart can handle the memory accesses, + // this will enforce that hart 0 handles them. + select_hart(0); + read(DMI_DMSTATUS); +} + +void dtm_t::idle() +{ + for (int idle_cycles = 0; idle_cycles < max_idle_cycles; idle_cycles++) + nop(); +} + +void dtm_t::producer_thread() +{ + // Learn about the Debug Module and assert things we + // depend on in this code. + + // Enable the debugger. + write(DMI_DMCONTROL, DMI_DMCONTROL_DMACTIVE); + // Poll until the debugger agrees it's enabled. + while ((read(DMI_DMCONTROL) & DMI_DMCONTROL_DMACTIVE) == 0) ; + + // These are checked every time we run an abstract command. + uint32_t abstractcs = read(DMI_ABSTRACTCS); + ram_words = get_field(abstractcs, DMI_ABSTRACTCS_PROGSIZE); + data_words = get_field(abstractcs, DMI_ABSTRACTCS_DATACOUNT); + + // These things are only needed for the 'modify_csr' function. + // That could be re-written to not use these at some performance + // overhead. + uint32_t hartinfo = read(DMI_HARTINFO); + assert(get_field(hartinfo, DMI_HARTINFO_NSCRATCH) > 0); + assert(get_field(hartinfo, DMI_HARTINFO_DATAACCESS)); + + data_base = get_field(hartinfo, DMI_HARTINFO_DATAADDR); + + num_harts = enumerate_harts(); + halt(0); + // Note: We don't support systems with heterogeneous XLEN. + // It's possible to do this at the cost of extra cycles. + xlen = get_xlen(); + resume(0); + + running = true; + + htif_t::run(); + + while (true) + nop(); +} + +void dtm_t::start_host_thread() +{ + req_wait = false; + resp_wait = false; + + target = context_t::current(); + host.init(host_thread_main, this); + host.switch_to(); +} + +dtm_t::dtm_t(int argc, char** argv) + : htif_t(argc, argv), running(false) +{ + start_host_thread(); +} + +dtm_t::~dtm_t() +{ +} + +void dtm_t::tick( + bool req_ready, + bool resp_valid, + resp resp_bits) +{ + if (!resp_wait) { + if (!req_wait) { + req_wait = true; + } else if (req_ready) { + req_wait = false; + resp_wait = true; + } + } + + if (resp_valid) { + assert(resp_wait); + resp_wait = false; + + resp_buf = resp_bits; + // update the target with the current context + target = context_t::current(); + host.switch_to(); + } +} + +void dtm_t::return_resp(resp resp_bits){ + resp_buf = resp_bits; + target = context_t::current(); + host.switch_to(); +} diff --git a/vendor/riscv-isa-sim/fesvr/dtm.h b/vendor/riscv-isa-sim/fesvr/dtm.h new file mode 100644 index 00000000..fbf161ef --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/dtm.h @@ -0,0 +1,115 @@ +#ifndef _ROCKET_DTM_H +#define _ROCKET_DTM_H + +#include "htif.h" +#include "context.h" +#include +#include +#include +#include +#include +#include + +// abstract debug transport module +class dtm_t : public htif_t +{ + public: + dtm_t(int argc, char**argv); + ~dtm_t(); + + struct req { + uint32_t addr; + uint32_t op; + uint32_t data; + }; + + struct resp { + uint32_t resp; + uint32_t data; + }; + + void tick( + bool req_ready, + bool resp_valid, + resp resp_bits + ); + // Akin to tick, but the target thread returns a response on every invocation + void return_resp( + resp resp_bits + ); + + + bool req_valid() { return req_wait; } + req req_bits() { return req_buf; } + bool resp_ready() { return true; } + + uint32_t read(uint32_t addr); + uint32_t write(uint32_t addr, uint32_t data); + void nop(); + + uint64_t read_csr(unsigned which); + uint64_t write_csr(unsigned which, uint64_t data); + uint64_t clear_csr(unsigned which, uint64_t data); + uint64_t set_csr(unsigned which, uint64_t data); + void fence_i(); + + void producer_thread(); + + protected: + virtual void read_chunk(addr_t taddr, size_t len, void* dst) override; + virtual void write_chunk(addr_t taddr, size_t len, const void* src) override; + virtual void clear_chunk(addr_t taddr, size_t len) override; + virtual size_t chunk_align() override; + virtual size_t chunk_max_size() override; + virtual void reset() override; + virtual void idle() override; + + private: + context_t host; + context_t* target; + pthread_t producer; + sem_t req_produce; + sem_t req_consume; + sem_t resp_produce; + sem_t resp_consume; + req req_buf; + resp resp_buf; + bool running; + + uint32_t run_abstract_command(uint32_t command, const uint32_t program[], size_t program_n, + uint32_t data[], size_t data_n); + + void die(uint32_t cmderr); + void halt(int); + int enumerate_harts(); + void select_hart(int); + void resume(int); + uint64_t save_reg(unsigned regno); + void restore_reg(unsigned regno, uint64_t val); + + uint64_t modify_csr(unsigned which, uint64_t data, uint32_t type); + + bool req_wait; + bool resp_wait; + uint32_t data_base; + + uint32_t xlen; + + static const int max_idle_cycles = 10000; + + size_t ram_words; + size_t data_words; + int num_harts; + int current_hart; + + uint32_t get_xlen(); + uint32_t do_command(dtm_t::req r); + + void parse_args(const std::vector& args); + void register_devices(); + void start_host_thread(); + + friend class memif_t; +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/dummy.cc b/vendor/riscv-isa-sim/fesvr/dummy.cc new file mode 100644 index 00000000..a155d3e5 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/dummy.cc @@ -0,0 +1,4 @@ +// See LICENSE for license details. + +// help out poor, C-centric autoconf +extern "C" void libfesvr_is_present() {} diff --git a/vendor/riscv-isa-sim/fesvr/elf.h b/vendor/riscv-isa-sim/fesvr/elf.h new file mode 100644 index 00000000..7b38bf11 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/elf.h @@ -0,0 +1,134 @@ +// See LICENSE for details. + +#ifndef _ELF_H +#define _ELF_H + +#include + +#define ET_EXEC 2 +#define EM_RISCV 243 +#define EM_NONE 0 +#define EV_CURRENT 1 + +#define IS_ELF(hdr) \ + ((hdr).e_ident[0] == 0x7f && (hdr).e_ident[1] == 'E' && \ + (hdr).e_ident[2] == 'L' && (hdr).e_ident[3] == 'F') + +#define ELF_SWAP(hdr, val) (IS_ELFLE(hdr)? from_le((val)) : from_be((val))) + +#define IS_ELF32(hdr) (IS_ELF(hdr) && (hdr).e_ident[4] == 1) +#define IS_ELF64(hdr) (IS_ELF(hdr) && (hdr).e_ident[4] == 2) +#define IS_ELFLE(hdr) (IS_ELF(hdr) && (hdr).e_ident[5] == 1) +#define IS_ELFBE(hdr) (IS_ELF(hdr) && (hdr).e_ident[5] == 2) +#define IS_ELF_EXEC(hdr) (IS_ELF(hdr) && ELF_SWAP((hdr), (hdr).e_type) == ET_EXEC) +#define IS_ELF_RISCV(hdr) (IS_ELF(hdr) && ELF_SWAP((hdr), (hdr).e_machine) == EM_RISCV) +#define IS_ELF_EM_NONE(hdr) (IS_ELF(hdr) && ELF_SWAP((hdr), (hdr).e_machine) == EM_NONE) +#define IS_ELF_VCURRENT(hdr) (IS_ELF(hdr) && ELF_SWAP((hdr), (hdr).e_version) == EV_CURRENT) + +#define PT_LOAD 1 + +#define SHT_NOBITS 8 + +typedef struct { + uint8_t e_ident[16]; + uint16_t e_type; + uint16_t e_machine; + uint32_t e_version; + uint32_t e_entry; + uint32_t e_phoff; + uint32_t e_shoff; + uint32_t e_flags; + uint16_t e_ehsize; + uint16_t e_phentsize; + uint16_t e_phnum; + uint16_t e_shentsize; + uint16_t e_shnum; + uint16_t e_shstrndx; +} Elf32_Ehdr; + +typedef struct { + uint32_t sh_name; + uint32_t sh_type; + uint32_t sh_flags; + uint32_t sh_addr; + uint32_t sh_offset; + uint32_t sh_size; + uint32_t sh_link; + uint32_t sh_info; + uint32_t sh_addralign; + uint32_t sh_entsize; +} Elf32_Shdr; + +typedef struct +{ + uint32_t p_type; + uint32_t p_offset; + uint32_t p_vaddr; + uint32_t p_paddr; + uint32_t p_filesz; + uint32_t p_memsz; + uint32_t p_flags; + uint32_t p_align; +} Elf32_Phdr; + +typedef struct +{ + uint32_t st_name; + uint32_t st_value; + uint32_t st_size; + uint8_t st_info; + uint8_t st_other; + uint16_t st_shndx; +} Elf32_Sym; + +typedef struct { + uint8_t e_ident[16]; + uint16_t e_type; + uint16_t e_machine; + uint32_t e_version; + uint64_t e_entry; + uint64_t e_phoff; + uint64_t e_shoff; + uint32_t e_flags; + uint16_t e_ehsize; + uint16_t e_phentsize; + uint16_t e_phnum; + uint16_t e_shentsize; + uint16_t e_shnum; + uint16_t e_shstrndx; +} Elf64_Ehdr; + +typedef struct { + uint32_t sh_name; + uint32_t sh_type; + uint64_t sh_flags; + uint64_t sh_addr; + uint64_t sh_offset; + uint64_t sh_size; + uint32_t sh_link; + uint32_t sh_info; + uint64_t sh_addralign; + uint64_t sh_entsize; +} Elf64_Shdr; + +typedef struct { + uint32_t p_type; + uint32_t p_flags; + uint64_t p_offset; + uint64_t p_vaddr; + uint64_t p_paddr; + uint64_t p_filesz; + uint64_t p_memsz; + uint64_t p_align; +} Elf64_Phdr; + +typedef struct { + uint32_t st_name; + uint8_t st_info; + uint8_t st_other; + uint16_t st_shndx; + uint64_t st_value; + uint64_t st_size; +} Elf64_Sym; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/elf2hex.cc b/vendor/riscv-isa-sim/fesvr/elf2hex.cc new file mode 100644 index 00000000..327cf2d9 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/elf2hex.cc @@ -0,0 +1,47 @@ +// See LICENSE for license details. + +#include +#include "htif_hexwriter.h" +#include "memif.h" +#include "elfloader.h" + +int main(int argc, char** argv) +{ + if(argc < 4 || argc > 5) + { + std::cerr << "Usage: " << argv[0] << " [base]" << std::endl; + return 1; + } + + unsigned width = atoi(argv[1]); + if(width == 0 || (width & (width-1))) + { + std::cerr << "width must be a power of 2" << std::endl; + return 1; + } + + unsigned long long int base = 0; + if(argc==5) { + base = atoll(argv[4]); + if(base & (width-1)) + { + std::cerr << "base must be divisible by width" << std::endl; + return 1; + } + } + + unsigned depth = atoi(argv[2]); + if(depth == 0 || (depth & (depth-1))) + { + std::cerr << "depth must be a power of 2" << std::endl; + return 1; + } + + htif_hexwriter_t htif(base, width, depth); + memif_t memif(&htif); + reg_t entry; + load_elf(argv[3], &memif, &entry); + std::cout << htif; + + return 0; +} diff --git a/vendor/riscv-isa-sim/fesvr/elfloader.cc b/vendor/riscv-isa-sim/fesvr/elfloader.cc new file mode 100644 index 00000000..76cd6da5 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/elfloader.cc @@ -0,0 +1,117 @@ +// See LICENSE for license details. + +#include "elf.h" +#include "memif.h" +#include "byteorder.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +std::map load_elf(const char* fn, memif_t* memif, reg_t* entry) +{ + int fd = open(fn, O_RDONLY); + struct stat s; + assert(fd != -1); + if (fstat(fd, &s) < 0) + abort(); + size_t size = s.st_size; + + char* buf = (char*)mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0); + assert(buf != MAP_FAILED); + close(fd); + + assert(size >= sizeof(Elf64_Ehdr)); + const Elf64_Ehdr* eh64 = (const Elf64_Ehdr*)buf; + assert(IS_ELF32(*eh64) || IS_ELF64(*eh64)); + assert(IS_ELFLE(*eh64) || IS_ELFBE(*eh64)); + assert(IS_ELF_EXEC(*eh64)); + assert(IS_ELF_RISCV(*eh64) || IS_ELF_EM_NONE(*eh64)); + assert(IS_ELF_VCURRENT(*eh64)); + + std::vector zeros; + std::map symbols; + +#define LOAD_ELF(ehdr_t, phdr_t, shdr_t, sym_t, bswap) \ + do { \ + ehdr_t* eh = (ehdr_t*)buf; \ + phdr_t* ph = (phdr_t*)(buf + bswap(eh->e_phoff)); \ + *entry = bswap(eh->e_entry); \ + assert(size >= bswap(eh->e_phoff) + bswap(eh->e_phnum) * sizeof(*ph)); \ + for (unsigned i = 0; i < bswap(eh->e_phnum); i++) { \ + if (bswap(ph[i].p_type) == PT_LOAD && bswap(ph[i].p_memsz)) { \ + if (bswap(ph[i].p_filesz)) { \ + assert(size >= bswap(ph[i].p_offset) + bswap(ph[i].p_filesz)); \ + memif->write(bswap(ph[i].p_paddr), bswap(ph[i].p_filesz), \ + (uint8_t*)buf + bswap(ph[i].p_offset)); \ + } \ + if (size_t pad = bswap(ph[i].p_memsz) - bswap(ph[i].p_filesz)) { \ + zeros.resize(pad); \ + memif->write(bswap(ph[i].p_paddr) + bswap(ph[i].p_filesz), pad, \ + zeros.data()); \ + } \ + } \ + } \ + shdr_t* sh = (shdr_t*)(buf + bswap(eh->e_shoff)); \ + assert(size >= bswap(eh->e_shoff) + bswap(eh->e_shnum) * sizeof(*sh)); \ + assert(bswap(eh->e_shstrndx) < bswap(eh->e_shnum)); \ + assert(size >= bswap(sh[bswap(eh->e_shstrndx)].sh_offset) + \ + bswap(sh[bswap(eh->e_shstrndx)].sh_size)); \ + char* shstrtab = buf + bswap(sh[bswap(eh->e_shstrndx)].sh_offset); \ + unsigned strtabidx = 0, symtabidx = 0; \ + for (unsigned i = 0; i < bswap(eh->e_shnum); i++) { \ + unsigned max_len = \ + bswap(sh[bswap(eh->e_shstrndx)].sh_size) - bswap(sh[i].sh_name); \ + assert(bswap(sh[i].sh_name) < bswap(sh[bswap(eh->e_shstrndx)].sh_size)); \ + assert(strnlen(shstrtab + bswap(sh[i].sh_name), max_len) < max_len); \ + if (bswap(sh[i].sh_type) & SHT_NOBITS) continue; \ + assert(size >= bswap(sh[i].sh_offset) + bswap(sh[i].sh_size)); \ + if (strcmp(shstrtab + bswap(sh[i].sh_name), ".strtab") == 0) \ + strtabidx = i; \ + if (strcmp(shstrtab + bswap(sh[i].sh_name), ".symtab") == 0) \ + symtabidx = i; \ + } \ + if (strtabidx && symtabidx) { \ + char* strtab = buf + bswap(sh[strtabidx].sh_offset); \ + sym_t* sym = (sym_t*)(buf + bswap(sh[symtabidx].sh_offset)); \ + for (unsigned i = 0; i < bswap(sh[symtabidx].sh_size) / sizeof(sym_t); \ + i++) { \ + unsigned max_len = \ + bswap(sh[strtabidx].sh_size) - bswap(sym[i].st_name); \ + assert(bswap(sym[i].st_name) < bswap(sh[strtabidx].sh_size)); \ + assert(strnlen(strtab + bswap(sym[i].st_name), max_len) < max_len); \ + symbols[strtab + bswap(sym[i].st_name)] = bswap(sym[i].st_value); \ + } \ + } \ + } while (0) + + if (IS_ELFLE(*eh64)) { + memif->set_target_endianness(memif_endianness_little); + if (IS_ELF32(*eh64)) + LOAD_ELF(Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Sym, from_le); + else + LOAD_ELF(Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Sym, from_le); + } else { +#ifndef RISCV_ENABLE_DUAL_ENDIAN + throw std::invalid_argument("Specified ELF is big endian. Configure with --enable-dual-endian to enable support"); +#else + memif->set_target_endianness(memif_endianness_big); + if (IS_ELF32(*eh64)) + LOAD_ELF(Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr, Elf32_Sym, from_be); + else + LOAD_ELF(Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr, Elf64_Sym, from_be); +#endif + } + + munmap(buf, size); + + return symbols; +} diff --git a/vendor/riscv-isa-sim/fesvr/elfloader.h b/vendor/riscv-isa-sim/fesvr/elfloader.h new file mode 100644 index 00000000..696ef478 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/elfloader.h @@ -0,0 +1,13 @@ +// See LICENSE for license details. + +#ifndef _ELFLOADER_H +#define _ELFLOADER_H + +#include "elf.h" +#include +#include + +class memif_t; +std::map load_elf(const char* fn, memif_t* memif, reg_t* entry); + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/fesvr.ac b/vendor/riscv-isa-sim/fesvr/fesvr.ac new file mode 100644 index 00000000..f741baea --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/fesvr.ac @@ -0,0 +1,11 @@ +AC_CHECK_LIB(pthread, pthread_create, [], [AC_MSG_ERROR([libpthread is required])]) + +AC_CHECK_MEMBER(struct statx.stx_ino, + AC_DEFINE_UNQUOTED(HAVE_STATX, 1, [Define to 1 if struct statx exists.]), + , +) + +AC_CHECK_MEMBER(struct statx.stx_mnt_id, + AC_DEFINE_UNQUOTED(HAVE_STATX_MNT_ID, 1, [Define to 1 if struct statx has stx_mnt_id.]), + , +) diff --git a/vendor/riscv-isa-sim/fesvr/fesvr.mk.in b/vendor/riscv-isa-sim/fesvr/fesvr.mk.in new file mode 100644 index 00000000..695de527 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/fesvr.mk.in @@ -0,0 +1,41 @@ +fesvr_hdrs = \ + byteorder.h \ + elf.h \ + elfloader.h \ + htif.h \ + dtm.h \ + memif.h \ + syscall.h \ + context.h \ + htif_pthread.h \ + htif_hexwriter.h \ + option_parser.h \ + term.h \ + device.h \ + rfb.h \ + tsi.h \ + +fesvr_install_hdrs = $(fesvr_hdrs) + +fesvr_install_config_hdr = yes + +fesvr_install_lib = yes + +fesvr_srcs = \ + elfloader.cc \ + htif.cc \ + memif.cc \ + dtm.cc \ + syscall.cc \ + device.cc \ + rfb.cc \ + context.cc \ + htif_pthread.cc \ + htif_hexwriter.cc \ + dummy.cc \ + option_parser.cc \ + term.cc \ + tsi.cc \ + +fesvr_install_prog_srcs = \ + elf2hex.cc \ diff --git a/vendor/riscv-isa-sim/fesvr/fesvr.pc.in b/vendor/riscv-isa-sim/fesvr/fesvr.pc.in new file mode 100644 index 00000000..f2d12563 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/fesvr.pc.in @@ -0,0 +1,26 @@ +#========================================================================= +# Modular C++ Build System Subproject Package Config +#========================================================================= +# Please read the documenation in 'mcppbs-uguide.txt' for more details +# on how the Modular C++ Build System works. + +#------------------------------------------------------------------------- +# Generic variables +#------------------------------------------------------------------------- + +prefix=@prefix@ +include_dir=${prefix}/include/fesvr +lib_dir=${prefix}/lib + +#------------------------------------------------------------------------- +# Keywords +#------------------------------------------------------------------------- + +Name : fesvr +Version : @PACKAGE_VERSION@ +Description : Frontend Server C/C++ API +Requires : @fesvr_pkcdeps@ +Cflags : -I${include_dir} @CPPFLAGS@ @fesvr_extra_cppflags@ +Libs : -L${lib_dir} @LDFLAGS@ @fesvr_extra_ldflags@ \ + -lfesvr @fesvr_extra_libs@ + diff --git a/vendor/riscv-isa-sim/fesvr/htif.cc b/vendor/riscv-isa-sim/fesvr/htif.cc new file mode 100644 index 00000000..ead309c8 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif.cc @@ -0,0 +1,415 @@ +// See LICENSE for license details. + +#include "htif.h" +#include "rfb.h" +#include "elfloader.h" +#include "platform.h" +#include "byteorder.h" +#include "trap.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Attempt to determine the execution prefix automatically. autoconf + * sets PREFIX, and pconfigure sets __PCONFIGURE__PREFIX. */ +#if !defined(PREFIX) && defined(__PCONFIGURE__PREFIX) +# define PREFIX __PCONFIGURE__PREFIX +#endif + +#ifndef TARGET_ARCH +# define TARGET_ARCH "riscv64-unknown-elf" +#endif + +#ifndef TARGET_DIR +# define TARGET_DIR "/" TARGET_ARCH "/bin/" +#endif + +static volatile bool signal_exit = false; +static void handle_signal(int sig) +{ + if (sig == SIGABRT || signal_exit) // someone set up us the bomb! + exit(-1); + signal_exit = true; + signal(sig, &handle_signal); +} + +htif_t::htif_t() + : mem(this), entry(DRAM_BASE), sig_addr(0), sig_len(0), + tohost_addr(0), fromhost_addr(0), exitcode(0), stopped(false), + syscall_proxy(this) +{ + signal(SIGINT, &handle_signal); + signal(SIGTERM, &handle_signal); + signal(SIGABRT, &handle_signal); // we still want to call static destructors +} + +htif_t::htif_t(int argc, char** argv) : htif_t() +{ + //Set line size as 16 by default. + line_size = 16; + parse_arguments(argc, argv); + register_devices(); +} + +htif_t::htif_t(const std::vector& args) : htif_t() +{ + int argc = args.size() + 1; + char * argv[argc]; + argv[0] = (char *) "htif"; + for (unsigned int i = 0; i < args.size(); i++) { + argv[i+1] = (char *) args[i].c_str(); + } + //Set line size as 16 by default. + line_size = 16; + parse_arguments(argc, argv); + register_devices(); +} + +htif_t::~htif_t() +{ + for (auto d : dynamic_devices) + delete d; +} + +void htif_t::start() +{ + if (!targs.empty() && targs[0] != "none") + load_program(); + + reset(); +} + +static void bad_address(const std::string& situation, reg_t addr) +{ + std::cerr << "Access exception occurred while " << situation << ":\n"; + std::cerr << "Memory address 0x" << std::hex << addr << " is invalid\n"; + exit(-1); +} + +std::map htif_t::load_payload(const std::string& payload, reg_t* entry) +{ + std::string path; + if (access(payload.c_str(), F_OK) == 0) + path = payload; + else if (payload.find('/') == std::string::npos) + { + std::string test_path = PREFIX TARGET_DIR + payload; + if (access(test_path.c_str(), F_OK) == 0) + path = test_path; + } + + if (path.empty()) + throw std::runtime_error( + "could not open " + payload + + " (did you misspell it? If VCS, did you forget +permissive/+permissive-off?)"); + + // temporarily construct a memory interface that skips writing bytes + // that have already been preloaded through a sideband + class preload_aware_memif_t : public memif_t { + public: + preload_aware_memif_t(htif_t* htif) : memif_t(htif), htif(htif) {} + + void write(addr_t taddr, size_t len, const void* src) override + { + if (!htif->is_address_preloaded(taddr, len)) + memif_t::write(taddr, len, src); + } + + private: + htif_t* htif; + } preload_aware_memif(this); + + try { + return load_elf(path.c_str(), &preload_aware_memif, entry); + } catch (mem_trap_t& t) { + bad_address("loading payload " + payload, t.get_tval()); + abort(); + } +} + +void htif_t::load_program() +{ + std::map symbols = load_payload(targs[0], &entry); + + if (symbols.count("tohost") && symbols.count("fromhost")) { + tohost_addr = symbols["tohost"]; + fromhost_addr = symbols["fromhost"]; + } else { + fprintf(stderr, "warning: tohost and fromhost symbols not in ELF; can't communicate with target\n"); + } + + // detect torture tests so we can print the memory signature at the end + if (symbols.count("begin_signature") && symbols.count("end_signature")) + { + sig_addr = symbols["begin_signature"]; + sig_len = symbols["end_signature"] - sig_addr; + } + + for (auto payload : payloads) + { + reg_t dummy_entry; + load_payload(payload, &dummy_entry); + } + + for (auto i : symbols) + { + auto it = addr2symbol.find(i.second); + if ( it == addr2symbol.end()) + addr2symbol[i.second] = i.first; + } + + return; +} + +const char* htif_t::get_symbol(uint64_t addr) +{ + auto it = addr2symbol.find(addr); + + if(it == addr2symbol.end()) + return nullptr; + + return it->second.c_str(); +} + +void htif_t::stop() +{ + if (!sig_file.empty() && sig_len) // print final torture test signature + { + std::vector buf(sig_len); + mem.read(sig_addr, sig_len, buf.data()); + + std::ofstream sigs(sig_file); + assert(sigs && "can't open signature file!"); + sigs << std::setfill('0') << std::hex; + + for (addr_t i = 0; i < sig_len; i += line_size) + { + for (addr_t j = line_size; j > 0; j--) + if (i+j <= sig_len) + sigs << std::setw(2) << (uint16_t)buf[i+j-1]; + else + sigs << std::setw(2) << (uint16_t)0; + sigs << '\n'; + } + + sigs.close(); + } + + stopped = true; +} + +void htif_t::clear_chunk(addr_t taddr, size_t len) +{ + char zeros[chunk_max_size()]; + memset(zeros, 0, chunk_max_size()); + + for (size_t pos = 0; pos < len; pos += chunk_max_size()) + write_chunk(taddr + pos, std::min(len - pos, chunk_max_size()), zeros); +} + +int htif_t::run() +{ + start(); + + auto enq_func = [](std::queue* q, uint64_t x) { q->push(x); }; + std::queue fromhost_queue; + std::function fromhost_callback = + std::bind(enq_func, &fromhost_queue, std::placeholders::_1); + + if (tohost_addr == 0) { + while (true) + idle(); + } + + while (!signal_exit && exitcode == 0) + { + uint64_t tohost; + + try { + if ((tohost = from_target(mem.read_uint64(tohost_addr))) != 0) + mem.write_uint64(tohost_addr, target_endian::zero); + } catch (mem_trap_t& t) { + bad_address("accessing tohost", t.get_tval()); + } + + try { + if (tohost != 0) { + command_t cmd(mem, tohost, fromhost_callback); + device_list.handle_command(cmd); + } else { + idle(); + } + + device_list.tick(); + } catch (mem_trap_t& t) { + std::stringstream tohost_hex; + tohost_hex << std::hex << tohost; + bad_address("host was accessing memory on behalf of target (tohost = 0x" + tohost_hex.str() + ")", t.get_tval()); + } + + try { + if (!fromhost_queue.empty() && !mem.read_uint64(fromhost_addr)) { + mem.write_uint64(fromhost_addr, to_target(fromhost_queue.front())); + fromhost_queue.pop(); + } + } catch (mem_trap_t& t) { + bad_address("accessing fromhost", t.get_tval()); + } + } + + stop(); + + return exit_code(); +} + +bool htif_t::done() +{ + return stopped; +} + +int htif_t::exit_code() +{ + return exitcode >> 1; +} + +void htif_t::parse_arguments(int argc, char ** argv) +{ + optind = 0; // reset optind as HTIF may run getopt _after_ others + while (1) { + static struct option long_options[] = { HTIF_LONG_OPTIONS }; + int option_index = 0; + int c = getopt_long(argc, argv, "-h", long_options, &option_index); + + if (c == -1) break; + retry: + switch (c) { + case 'h': usage(argv[0]); + throw std::invalid_argument("User queried htif_t help text"); + case HTIF_LONG_OPTIONS_OPTIND: + if (optarg) dynamic_devices.push_back(new rfb_t(atoi(optarg))); + else dynamic_devices.push_back(new rfb_t); + break; + case HTIF_LONG_OPTIONS_OPTIND + 1: + // [TODO] Remove once disks are supported again + throw std::invalid_argument("--disk/+disk unsupported (use a ramdisk)"); + dynamic_devices.push_back(new disk_t(optarg)); + break; + case HTIF_LONG_OPTIONS_OPTIND + 2: + sig_file = optarg; + break; + case HTIF_LONG_OPTIONS_OPTIND + 3: + syscall_proxy.set_chroot(optarg); + break; + case HTIF_LONG_OPTIONS_OPTIND + 4: + payloads.push_back(optarg); + break; + case HTIF_LONG_OPTIONS_OPTIND + 5: + line_size = atoi(optarg); + + break; + case '?': + if (!opterr) + break; + throw std::invalid_argument("Unknown argument (did you mean to enable +permissive parsing?)"); + case 1: { + std::string arg = optarg; + if (arg == "+h" || arg == "+help") { + c = 'h'; + optarg = nullptr; + } + else if (arg == "+rfb") { + c = HTIF_LONG_OPTIONS_OPTIND; + optarg = nullptr; + } + else if (arg.find("+rfb=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND; + optarg = optarg + 5; + } + else if (arg.find("+disk=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND + 1; + optarg = optarg + 6; + } + else if (arg.find("+signature=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND + 2; + optarg = optarg + 11; + } + else if (arg.find("+chroot=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND + 3; + optarg = optarg + 8; + } + else if (arg.find("+payload=") == 0) { + c = HTIF_LONG_OPTIONS_OPTIND + 4; + optarg = optarg + 9; + } + else if(arg.find("+signature-granularity=")==0){ + c = HTIF_LONG_OPTIONS_OPTIND + 5; + optarg = optarg + 23; + } + else if (arg.find("+permissive-off") == 0) { + if (opterr) + throw std::invalid_argument("Found +permissive-off when not parsing permissively"); + opterr = 1; + break; + } + else if (arg.find("+permissive") == 0) { + if (!opterr) + throw std::invalid_argument("Found +permissive when already parsing permissively"); + opterr = 0; + break; + } + else { + if (!opterr) + break; + else { + optind--; + goto done_processing; + } + } + goto retry; + } + } + } + +done_processing: + while (optind < argc) + targs.push_back(argv[optind++]); + if (!targs.size()) { + usage(argv[0]); + throw std::invalid_argument("No binary specified (Did you forget it? Did you forget '+permissive-off' if running with +permissive?)"); + } +} + +void htif_t::register_devices() +{ + device_list.register_device(&syscall_proxy); + device_list.register_device(&bcd); + for (auto d : dynamic_devices) + device_list.register_device(d); +} + +void htif_t::usage(const char * program_name) +{ + printf("Usage: %s [EMULATOR OPTION]... [VERILOG PLUSARG]... [HOST OPTION]... BINARY [TARGET OPTION]...\n ", + program_name); + fputs("\ +Run a BINARY on the Rocket Chip emulator.\n\ +\n\ +Mandatory arguments to long options are mandatory for short options too.\n\ +\n\ +EMULATOR OPTIONS\n\ + Consult emulator.cc if using Verilator or VCS documentation if using VCS\n\ + for available options.\n\ +EMUALTOR VERILOG PLUSARGS\n\ + Consult generated-src*/*.plusArgs for available options\n\ +", stdout); + fputs("\n" HTIF_USAGE_OPTIONS, stdout); +} diff --git a/vendor/riscv-isa-sim/fesvr/htif.h b/vendor/riscv-isa-sim/fesvr/htif.h new file mode 100644 index 00000000..3cee25f7 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif.h @@ -0,0 +1,156 @@ +// See LICENSE for license details. + +#ifndef __HTIF_H +#define __HTIF_H + +#include "memif.h" +#include "syscall.h" +#include "device.h" +#include "byteorder.h" +#include +#include +#include +#include + +class htif_t : public chunked_memif_t +{ + public: + htif_t(); + htif_t(int argc, char** argv); + htif_t(const std::vector& args); + virtual ~htif_t(); + + virtual void start(); + virtual void stop(); + + int run(); + bool done(); + int exit_code(); + + virtual memif_t& memif() { return mem; } + + template inline T from_target(target_endian n) const + { +#ifdef RISCV_ENABLE_DUAL_ENDIAN + memif_endianness_t endianness = get_target_endianness(); + assert(endianness == memif_endianness_little || endianness == memif_endianness_big); + + return endianness == memif_endianness_big? n.from_be() : n.from_le(); +#else + return n.from_le(); +#endif + } + + template inline target_endian to_target(T n) const + { +#ifdef RISCV_ENABLE_DUAL_ENDIAN + memif_endianness_t endianness = get_target_endianness(); + assert(endianness == memif_endianness_little || endianness == memif_endianness_big); + + return endianness == memif_endianness_big? target_endian::to_be(n) : target_endian::to_le(n); +#else + return target_endian::to_le(n); +#endif + } + + protected: + virtual void reset() = 0; + + virtual void read_chunk(addr_t taddr, size_t len, void* dst) = 0; + virtual void write_chunk(addr_t taddr, size_t len, const void* src) = 0; + virtual void clear_chunk(addr_t taddr, size_t len); + + virtual size_t chunk_align() = 0; + virtual size_t chunk_max_size() = 0; + + virtual std::map load_payload(const std::string& payload, reg_t* entry); + virtual void load_program(); + virtual void idle() {} + + const std::vector& host_args() { return hargs; } + + reg_t get_entry_point() { return entry; } + + // indicates that the initial program load can skip writing this address + // range to memory, because it has already been loaded through a sideband + virtual bool is_address_preloaded(addr_t taddr, size_t len) { return false; } + + // Given an address, return symbol from addr2symbol map + const char* get_symbol(uint64_t addr); + + private: + void parse_arguments(int argc, char ** argv); + void register_devices(); + void usage(const char * program_name); + + memif_t mem; + reg_t entry; + bool writezeros; + std::vector hargs; + std::vector targs; + std::string sig_file; + unsigned int line_size; + addr_t sig_addr; // torture + addr_t sig_len; // torture + addr_t tohost_addr; + addr_t fromhost_addr; + int exitcode; + bool stopped; + + device_list_t device_list; + syscall_t syscall_proxy; + bcd_t bcd; + std::vector dynamic_devices; + std::vector payloads; + + const std::vector& target_args() { return targs; } + + std::map addr2symbol; + + friend class memif_t; + friend class syscall_t; +}; + +/* Alignment guide for emulator.cc options: + -x, --long-option Description with max 80 characters --------------->\n\ + +plus-arg-equivalent\n\ + */ +#define HTIF_USAGE_OPTIONS \ +"HOST OPTIONS\n\ + -h, --help Display this help and exit\n\ + +h, +help\n\ + +permissive The host will ignore any unparsed options up until\n\ + +permissive-off (Only needed for VCS)\n\ + +permissive-off Stop ignoring options. This is mandatory if using\n\ + +permissive (Only needed for VCS)\n\ + --rfb=DISPLAY Add new remote frame buffer on display DISPLAY\n\ + +rfb=DISPLAY to be accessible on 5900 + DISPLAY (default = 0)\n\ + --signature=FILE Write torture test signature to FILE\n\ + +signature=FILE\n\ + --signature-granularity=VAL Size of each line in signature.\n\ + +signature-granularity=VAL\n\ + --chroot=PATH Use PATH as location of syscall-servicing binaries\n\ + +chroot=PATH\n\ + --payload=PATH Load PATH memory as an additional ELF payload\n\ + +payload=PATH\n\ +\n\ +HOST OPTIONS (currently unsupported)\n\ + --disk=DISK Add DISK device. Use a ramdisk since this isn't\n\ + +disk=DISK supported\n\ +\n\ +TARGET (RISC-V BINARY) OPTIONS\n\ + These are the options passed to the program executing on the emulated RISC-V\n\ + microprocessor.\n" + +#define HTIF_LONG_OPTIONS_OPTIND 1024 +#define HTIF_LONG_OPTIONS \ +{"help", no_argument, 0, 'h' }, \ +{"rfb", optional_argument, 0, HTIF_LONG_OPTIONS_OPTIND }, \ +{"disk", required_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 1 }, \ +{"signature", required_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 2 }, \ +{"chroot", required_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 3 }, \ +{"payload", required_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 4 }, \ +{"signature-granularity", optional_argument, 0, HTIF_LONG_OPTIONS_OPTIND + 5 }, \ +{0, 0, 0, 0} + +#endif // __HTIF_H diff --git a/vendor/riscv-isa-sim/fesvr/htif_hexwriter.cc b/vendor/riscv-isa-sim/fesvr/htif_hexwriter.cc new file mode 100644 index 00000000..e4811b3b --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif_hexwriter.cc @@ -0,0 +1,76 @@ +// See LICENSE for license details. + +#include +#include +#include "htif_hexwriter.h" + +htif_hexwriter_t::htif_hexwriter_t(size_t b, size_t w, size_t d) + : base(b), width(w), depth(d) +{ +} + +void htif_hexwriter_t::read_chunk(addr_t taddr, size_t len, void* vdst) +{ + taddr -= base; + + assert(len % chunk_align() == 0); + assert(taddr < width*depth); + assert(taddr+len <= width*depth); + + uint8_t* dst = (uint8_t*)vdst; + while(len) + { + if(mem[taddr/width].size() == 0) + mem[taddr/width].resize(width,0); + + for(size_t j = 0; j < width; j++) + dst[j] = mem[taddr/width][j]; + + len -= width; + taddr += width; + dst += width; + } +} + +void htif_hexwriter_t::write_chunk(addr_t taddr, size_t len, const void* vsrc) +{ + taddr -= base; + + assert(len % chunk_align() == 0); + assert(taddr < width*depth); + assert(taddr+len <= width*depth); + + const uint8_t* src = (const uint8_t*)vsrc; + while(len) + { + if(mem[taddr/width].size() == 0) + mem[taddr/width].resize(width,0); + + for(size_t j = 0; j < width; j++) + mem[taddr/width][j] = src[j]; + + len -= width; + taddr += width; + } +} + +std::ostream& operator<< (std::ostream& o, const htif_hexwriter_t& h) +{ + std::ios_base::fmtflags flags = o.setf(std::ios::hex,std::ios::basefield); + + for(size_t addr = 0; addr < h.depth; addr++) + { + std::map >::const_iterator i = h.mem.find(addr); + if(i == h.mem.end()) + for(size_t j = 0; j < h.width; j++) + o << "00"; + else + for(size_t j = 0; j < h.width; j++) + o << ((i->second[h.width-j-1] >> 4) & 0xF) << (i->second[h.width-j-1] & 0xF); + o << std::endl; + } + + o.setf(flags); + + return o; +} diff --git a/vendor/riscv-isa-sim/fesvr/htif_hexwriter.h b/vendor/riscv-isa-sim/fesvr/htif_hexwriter.h new file mode 100644 index 00000000..72561662 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif_hexwriter.h @@ -0,0 +1,32 @@ +// See LICENSE for license details. + +#ifndef __HTIF_HEXWRITER_H +#define __HTIF_HEXWRITER_H + +#include +#include +#include +#include "memif.h" + +class htif_hexwriter_t : public chunked_memif_t +{ +public: + htif_hexwriter_t(size_t b, size_t w, size_t d); + +protected: + size_t base; + size_t width; + size_t depth; + std::map > mem; + + void read_chunk(addr_t taddr, size_t len, void* dst); + void write_chunk(addr_t taddr, size_t len, const void* src); + void clear_chunk(addr_t taddr, size_t len) {} + + size_t chunk_max_size() { return width; } + size_t chunk_align() { return width; } + + friend std::ostream& operator<< (std::ostream&, const htif_hexwriter_t&); +}; + +#endif // __HTIF_HEXWRITER_H diff --git a/vendor/riscv-isa-sim/fesvr/htif_pthread.cc b/vendor/riscv-isa-sim/fesvr/htif_pthread.cc new file mode 100644 index 00000000..b9e3832b --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif_pthread.cc @@ -0,0 +1,66 @@ +// See LICENSE for license details. + +#include "htif_pthread.h" +#include +#include + +void htif_pthread_t::thread_main(void* arg) +{ + htif_pthread_t* htif = static_cast(arg); + htif->run(); + while (true) + htif->target->switch_to(); +} + +htif_pthread_t::htif_pthread_t(int argc, char** argv) + : htif_t(argc, argv) +{ + target = context_t::current(); + host.init(thread_main, this); +} + +htif_pthread_t::~htif_pthread_t() +{ +} + +ssize_t htif_pthread_t::read(void* buf, size_t max_size) +{ + while (th_data.size() == 0) + target->switch_to(); + + size_t s = std::min(max_size, th_data.size()); + std::copy(th_data.begin(), th_data.begin() + s, (char*)buf); + th_data.erase(th_data.begin(), th_data.begin() + s); + + return s; +} + +ssize_t htif_pthread_t::write(const void* buf, size_t size) +{ + ht_data.insert(ht_data.end(), (const char*)buf, (const char*)buf + size); + return size; +} + +void htif_pthread_t::send(const void* buf, size_t size) +{ + th_data.insert(th_data.end(), (const char*)buf, (const char*)buf + size); +} + +void htif_pthread_t::recv(void* buf, size_t size) +{ + while (!this->recv_nonblocking(buf, size)) + ; +} + +bool htif_pthread_t::recv_nonblocking(void* buf, size_t size) +{ + if (ht_data.size() < size) + { + host.switch_to(); + return false; + } + + std::copy(ht_data.begin(), ht_data.begin() + size, (char*)buf); + ht_data.erase(ht_data.begin(), ht_data.begin() + size); + return true; +} diff --git a/vendor/riscv-isa-sim/fesvr/htif_pthread.h b/vendor/riscv-isa-sim/fesvr/htif_pthread.h new file mode 100644 index 00000000..c00c3823 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/htif_pthread.h @@ -0,0 +1,38 @@ +// See LICENSE for license details. + +#ifndef _HTIF_PTHREAD_H +#define _HTIF_PTHREAD_H + +#include "htif.h" +#include "context.h" +#include + +class htif_pthread_t : public htif_t +{ + public: + htif_pthread_t(int argc, char** argv); + virtual ~htif_pthread_t(); + + // target inteface + void send(const void* buf, size_t size); + void recv(void* buf, size_t size); + bool recv_nonblocking(void* buf, size_t size); + + protected: + // host interface + virtual ssize_t read(void* buf, size_t max_size); + virtual ssize_t write(const void* buf, size_t size); + + virtual size_t chunk_align() { return 64; } + virtual size_t chunk_max_size() { return 1024; } + + private: + context_t host; + context_t* target; + std::deque th_data; + std::deque ht_data; + + static void thread_main(void* htif); +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/memif.cc b/vendor/riscv-isa-sim/fesvr/memif.cc new file mode 100644 index 00000000..e56bd943 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/memif.cc @@ -0,0 +1,183 @@ +// See LICENSE for license details. + +#include +#include +#include +#include +#include "memif.h" + +void memif_t::read(addr_t addr, size_t len, void* bytes) +{ + size_t align = cmemif->chunk_align(); + if (len && (addr & (align-1))) + { + size_t this_len = std::min(len, align - size_t(addr & (align-1))); + uint8_t chunk[align]; + + cmemif->read_chunk(addr & ~(align-1), align, chunk); + memcpy(bytes, chunk + (addr & (align-1)), this_len); + + bytes = (char*)bytes + this_len; + addr += this_len; + len -= this_len; + } + + if (len & (align-1)) + { + size_t this_len = len & (align-1); + size_t start = len - this_len; + uint8_t chunk[align]; + + cmemif->read_chunk(addr + start, align, chunk); + memcpy((char*)bytes + start, chunk, this_len); + + len -= this_len; + } + + // now we're aligned + for (size_t pos = 0; pos < len; pos += cmemif->chunk_max_size()) + cmemif->read_chunk(addr + pos, std::min(cmemif->chunk_max_size(), len - pos), (char*)bytes + pos); +} + +void memif_t::write(addr_t addr, size_t len, const void* bytes) +{ + size_t align = cmemif->chunk_align(); + if (len && (addr & (align-1))) + { + size_t this_len = std::min(len, align - size_t(addr & (align-1))); + uint8_t chunk[align]; + + cmemif->read_chunk(addr & ~(align-1), align, chunk); + memcpy(chunk + (addr & (align-1)), bytes, this_len); + cmemif->write_chunk(addr & ~(align-1), align, chunk); + + bytes = (char*)bytes + this_len; + addr += this_len; + len -= this_len; + } + + if (len & (align-1)) + { + size_t this_len = len & (align-1); + size_t start = len - this_len; + uint8_t chunk[align]; + + cmemif->read_chunk(addr + start, align, chunk); + memcpy(chunk, (char*)bytes + start, this_len); + cmemif->write_chunk(addr + start, align, chunk); + + len -= this_len; + } + + // now we're aligned + bool all_zero = len != 0; + for (size_t i = 0; i < len; i++) + all_zero &= ((const char*)bytes)[i] == 0; + + if (all_zero) { + cmemif->clear_chunk(addr, len); + } else { + size_t max_chunk = cmemif->chunk_max_size(); + for (size_t pos = 0; pos < len; pos += max_chunk) + cmemif->write_chunk(addr + pos, std::min(max_chunk, len - pos), (char*)bytes + pos); + } +} + +#define MEMIF_READ_FUNC \ + if(addr & (sizeof(val)-1)) \ + throw std::runtime_error("misaligned address"); \ + this->read(addr, sizeof(val), &val); \ + return val + +#define MEMIF_WRITE_FUNC \ + if(addr & (sizeof(val)-1)) \ + throw std::runtime_error("misaligned address"); \ + this->write(addr, sizeof(val), &val) + +target_endian memif_t::read_uint8(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +target_endian memif_t::read_int8(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +void memif_t::write_uint8(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +void memif_t::write_int8(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +target_endian memif_t::read_uint16(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +target_endian memif_t::read_int16(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +void memif_t::write_uint16(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +void memif_t::write_int16(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +target_endian memif_t::read_uint32(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +target_endian memif_t::read_int32(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +void memif_t::write_uint32(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +void memif_t::write_int32(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +target_endian memif_t::read_uint64(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +target_endian memif_t::read_int64(addr_t addr) +{ + target_endian val; + MEMIF_READ_FUNC; +} + +void memif_t::write_uint64(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} + +void memif_t::write_int64(addr_t addr, target_endian val) +{ + MEMIF_WRITE_FUNC; +} diff --git a/vendor/riscv-isa-sim/fesvr/memif.h b/vendor/riscv-isa-sim/fesvr/memif.h new file mode 100644 index 00000000..001c4254 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/memif.h @@ -0,0 +1,82 @@ +// See LICENSE for license details. + +#ifndef __MEMIF_H +#define __MEMIF_H + +#include +#include +#include "byteorder.h" + +typedef uint64_t reg_t; +typedef int64_t sreg_t; +typedef reg_t addr_t; + +typedef enum { + memif_endianness_undecided, + memif_endianness_little, + memif_endianness_big +} memif_endianness_t; + +class chunked_memif_t +{ +public: + virtual void read_chunk(addr_t taddr, size_t len, void* dst) = 0; + virtual void write_chunk(addr_t taddr, size_t len, const void* src) = 0; + virtual void clear_chunk(addr_t taddr, size_t len) = 0; + + virtual size_t chunk_align() = 0; + virtual size_t chunk_max_size() = 0; + + virtual void set_target_endianness(memif_endianness_t endianness) {} + virtual memif_endianness_t get_target_endianness() const { + return memif_endianness_undecided; + } +}; + +class memif_t +{ +public: + memif_t(chunked_memif_t* _cmemif) : cmemif(_cmemif) {} + virtual ~memif_t(){} + + // read and write byte arrays + virtual void read(addr_t addr, size_t len, void* bytes); + virtual void write(addr_t addr, size_t len, const void* bytes); + + // read and write 8-bit words + virtual target_endian read_uint8(addr_t addr); + virtual target_endian read_int8(addr_t addr); + virtual void write_uint8(addr_t addr, target_endian val); + virtual void write_int8(addr_t addr, target_endian val); + + // read and write 16-bit words + virtual target_endian read_uint16(addr_t addr); + virtual target_endian read_int16(addr_t addr); + virtual void write_uint16(addr_t addr, target_endian val); + virtual void write_int16(addr_t addr, target_endian val); + + // read and write 32-bit words + virtual target_endian read_uint32(addr_t addr); + virtual target_endian read_int32(addr_t addr); + virtual void write_uint32(addr_t addr, target_endian val); + virtual void write_int32(addr_t addr, target_endian val); + + // read and write 64-bit words + virtual target_endian read_uint64(addr_t addr); + virtual target_endian read_int64(addr_t addr); + virtual void write_uint64(addr_t addr, target_endian val); + virtual void write_int64(addr_t addr, target_endian val); + + // endianness + virtual void set_target_endianness(memif_endianness_t endianness) { + cmemif->set_target_endianness(endianness); + } + virtual memif_endianness_t get_target_endianness() const { + return cmemif->get_target_endianness(); + } + +protected: + chunked_memif_t* cmemif; +}; + +#endif // __MEMIF_H diff --git a/vendor/riscv-isa-sim/fesvr/option_parser.cc b/vendor/riscv-isa-sim/fesvr/option_parser.cc new file mode 100644 index 00000000..72daec40 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/option_parser.cc @@ -0,0 +1,51 @@ +// See LICENSE for license details. + +#include "option_parser.h" +#include +#include +#include +#include + +void option_parser_t::option(char c, const char* s, int arg, std::function action) +{ + opts.push_back(option_t(c, s, arg, action)); +} + +const char* const* option_parser_t::parse(const char* const* argv0) +{ + assert(argv0); + const char* const* argv = argv0 + 1; + for (const char* opt; (opt = *argv) != NULL && opt[0] == '-'; argv++) + { + bool found = false; + for (auto it = opts.begin(); !found && it != opts.end(); it++) + { + size_t slen = it->str ? strlen(it->str) : 0; + bool chr_match = opt[1] != '-' && it->chr && opt[1] == it->chr; + bool str_match = opt[1] == '-' && slen && strncmp(opt+2, it->str, slen) == 0; + if (chr_match || (str_match && (opt[2+slen] == '=' || opt[2+slen] == '\0'))) + { + const char* optarg = + chr_match ? (opt[2] ? &opt[2] : NULL) : + opt[2+slen] ? &opt[3+slen] : + it->arg ? *(++argv) : NULL; + if (optarg && !it->arg) + error("no argument allowed for option", *argv0, opt); + if (!optarg && it->arg) + error("argument required for option", *argv0, opt); + it->func(optarg); + found = true; + } + } + if (!found) + error("unrecognized option", *argv0, opt); + } + return argv; +} + +void option_parser_t::error(const char* msg, const char* argv0, const char* arg) +{ + fprintf(stderr, "%s: %s %s\n", argv0, msg, arg ? arg : ""); + if (helpmsg) helpmsg(); + exit(1); +} diff --git a/vendor/riscv-isa-sim/fesvr/option_parser.h b/vendor/riscv-isa-sim/fesvr/option_parser.h new file mode 100644 index 00000000..b2cb8edf --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/option_parser.h @@ -0,0 +1,31 @@ +// See LICENSE for license details. + +#ifndef _OPTION_PARSER_H +#define _OPTION_PARSER_H + +#include +#include + +class option_parser_t +{ + public: + option_parser_t() : helpmsg(0) {} + void help(void (*helpm)(void)) { helpmsg = helpm; } + void option(char c, const char* s, int arg, std::function action); + const char* const* parse(const char* const* argv0); + private: + struct option_t + { + char chr; + const char* str; + int arg; + std::function func; + option_t(char chr, const char* str, int arg, std::function func) + : chr(chr), str(str), arg(arg), func(func) {} + }; + std::vector opts; + void (*helpmsg)(void); + void error(const char* msg, const char* argv0, const char* arg); +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/rfb.cc b/vendor/riscv-isa-sim/fesvr/rfb.cc new file mode 100644 index 00000000..2594a1b8 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/rfb.cc @@ -0,0 +1,230 @@ +#include "rfb.h" +#include "memif.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +using namespace std::placeholders; + +rfb_t::rfb_t(int display) + : sockfd(-1), afd(-1), + memif(0), addr(0), width(0), height(0), bpp(0), display(display), + thread(pthread_self()), fb1(0), fb2(0), read_pos(0), + lock(PTHREAD_MUTEX_INITIALIZER) +{ + register_command(0, std::bind(&rfb_t::handle_configure, this, _1), "configure"); + register_command(1, std::bind(&rfb_t::handle_set_address, this, _1), "set_address"); +} + +void* rfb_thread_main(void* arg) +{ + ((rfb_t*)arg)->thread_main(); + return 0; +} + +void rfb_t::thread_main() +{ + pthread_mutex_lock(&lock); + + int port = 5900 + display; + sockfd = socket(PF_INET, SOCK_STREAM, 0); + if (sockfd < 0) + throw std::runtime_error("could not acquire tcp socket"); + + struct sockaddr_in saddr, caddr; + saddr.sin_family = AF_INET; + saddr.sin_addr.s_addr = INADDR_ANY; + saddr.sin_port = htons(port); + if (bind(sockfd, (struct sockaddr*)&saddr, sizeof(saddr)) < 0) + throw std::runtime_error("could not bind to port " + std::to_string(port)); + if (listen(sockfd, 0) < 0) + throw std::runtime_error("could not listen on port " + std::to_string(port)); + + socklen_t clen = sizeof(caddr); + afd = accept(sockfd, (struct sockaddr*)&caddr, &clen); + if (afd < 0) + throw std::runtime_error("could not accept connection"); + + std::string version = "RFB 003.003\n"; + write(version); + if (read() != version) + throw std::runtime_error("bad client version"); + + write(str(uint32_t(htonl(1)))); + + read(); // clientinit + + std::string serverinit; + serverinit += str(uint16_t(htons(width))); + serverinit += str(uint16_t(htons(height))); + serverinit += pixel_format(); + std::string name = "RISC-V"; + serverinit += str(uint32_t(htonl(name.length()))); + serverinit += name; + write(serverinit); + + pthread_mutex_unlock(&lock); + + while (memif == NULL) + sched_yield(); + + while (memif != NULL) + { + std::string s = read(); + if (s.length() < 4) + break; //throw std::runtime_error("bad command"); + + switch (s[0]) + { + case 0: set_pixel_format(s); break; + case 2: set_encodings(s); break; + case 3: break; + } + } + + pthread_mutex_lock(&lock); + close(afd); + close(sockfd); + afd = -1; + sockfd = -1; + pthread_mutex_unlock(&lock); + + thread_main(); +} + +rfb_t::~rfb_t() +{ + memif = 0; + if (!pthread_equal(pthread_self(), thread)) + pthread_join(thread, 0); + delete [] fb1; + delete [] fb2; +} + +void rfb_t::set_encodings(const std::string& s) +{ + uint16_t n = htons(*(uint16_t*)&s[2]); + for (size_t b = s.length(); b < 4U+4U*n; b += read().length()); +} + +void rfb_t::set_pixel_format(const std::string& s) +{ + if (s.length() != 20 || s.substr(4, 16) != pixel_format()) + throw std::runtime_error("bad pixel format"); +} + +void rfb_t::fb_update(const std::string& s) +{ + std::string u; + u += str(uint8_t(0)); + u += str(uint8_t(0)); + u += str(uint16_t(htons(1))); + u += str(uint16_t(htons(0))); + u += str(uint16_t(htons(0))); + u += str(uint16_t(htons(width))); + u += str(uint16_t(htons(height))); + u += str(uint32_t(htonl(0))); + u += std::string((char*)fb1, fb_bytes()); + + try + { + write(u); + } + catch (std::runtime_error& e) + { + } +} + +void rfb_t::tick() +{ + if (fb_bytes() == 0 || memif == NULL) + return; + + memif->read(addr + read_pos, FB_ALIGN, const_cast(fb2 + read_pos)); + read_pos = (read_pos + FB_ALIGN) % fb_bytes(); + if (read_pos == 0) + { + std::swap(fb1, fb2); + if (pthread_mutex_trylock(&lock) == 0) + { + fb_update(""); + pthread_mutex_unlock(&lock); + } + } +} + +std::string rfb_t::pixel_format() +{ + int red_bits = 8, green_bits = 8, blue_bits = 8; + int bpp = red_bits + green_bits + blue_bits; + while (bpp & (bpp-1)) bpp++; + + std::string fmt; + fmt += str(uint8_t(bpp)); + fmt += str(uint8_t(red_bits + green_bits + blue_bits)); + fmt += str(uint8_t(0)); // little-endian + fmt += str(uint8_t(1)); // true color + fmt += str(uint16_t(htons((1<> 16; + + bpp = cmd.payload() >> 32; + if (bpp != 32) + throw std::runtime_error("rfb requires 32 bpp true color"); + + if (fb_bytes() % FB_ALIGN != 0) + throw std::runtime_error("rfb size must be a multiple of " + std::to_string(FB_ALIGN)); + + fb1 = new char[fb_bytes()]; + fb2 = new char[fb_bytes()]; + if (pthread_create(&thread, 0, rfb_thread_main, this)) + throw std::runtime_error("could not create thread"); + cmd.respond(1); +} + +void rfb_t::handle_set_address(command_t cmd) +{ + addr = cmd.payload(); + if (addr % FB_ALIGN != 0) + throw std::runtime_error("rfb address must be " + std::to_string(FB_ALIGN) + "-byte aligned"); + memif = &cmd.memif(); + cmd.respond(1); +} diff --git a/vendor/riscv-isa-sim/fesvr/rfb.h b/vendor/riscv-isa-sim/fesvr/rfb.h new file mode 100644 index 00000000..263663a2 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/rfb.h @@ -0,0 +1,53 @@ +#ifndef _RFB_H +#define _RFB_H + +#include "device.h" +#include "memif.h" +#include + +// remote frame buffer +class rfb_t : public device_t +{ + public: + rfb_t(int display = 0); + ~rfb_t(); + void tick(); + std::string name() { return "RISC-V"; } + const char* identity() { return "rfb"; } + + private: + template + std::string str(T x) + { + return std::string((char*)&x, sizeof(x)); + } + size_t fb_bytes() { return size_t(width) * height * bpp/8; } + void thread_main(); + friend void* rfb_thread_main(void*); + std::string pixel_format(); + void fb_update(const std::string& s); + void set_encodings(const std::string& s); + void set_pixel_format(const std::string& s); + void write(const std::string& s); + std::string read(); + void handle_configure(command_t cmd); + void handle_set_address(command_t cmd); + + int sockfd; + int afd; + memif_t* memif; + reg_t addr; + uint16_t width; + uint16_t height; + uint16_t bpp; + int display; + pthread_t thread; + volatile char* volatile fb1; + volatile char* volatile fb2; + size_t read_pos; + pthread_mutex_t lock; + + static const int FB_ALIGN = 256; +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/syscall.cc b/vendor/riscv-isa-sim/fesvr/syscall.cc new file mode 100644 index 00000000..ab7fc3b4 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/syscall.cc @@ -0,0 +1,502 @@ +// See LICENSE for license details. + +#include "syscall.h" +#include "htif.h" +#include "byteorder.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +using namespace std::placeholders; + +#define RISCV_AT_FDCWD -100 + +struct riscv_stat +{ + target_endian dev; + target_endian ino; + target_endian mode; + target_endian nlink; + target_endian uid; + target_endian gid; + target_endian rdev; + target_endian __pad1; + target_endian size; + target_endian blksize; + target_endian __pad2; + target_endian blocks; + target_endian atime; + target_endian __pad3; + target_endian mtime; + target_endian __pad4; + target_endian ctime; + target_endian __pad5; + target_endian __unused4; + target_endian __unused5; + + riscv_stat(const struct stat& s, htif_t* htif) + : dev(htif->to_target(s.st_dev)), + ino(htif->to_target(s.st_ino)), + mode(htif->to_target(s.st_mode)), + nlink(htif->to_target(s.st_nlink)), + uid(htif->to_target(s.st_uid)), + gid(htif->to_target(s.st_gid)), + rdev(htif->to_target(s.st_rdev)), __pad1(), + size(htif->to_target(s.st_size)), + blksize(htif->to_target(s.st_blksize)), __pad2(), + blocks(htif->to_target(s.st_blocks)), + atime(htif->to_target(s.st_atime)), __pad3(), + mtime(htif->to_target(s.st_mtime)), __pad4(), + ctime(htif->to_target(s.st_ctime)), __pad5(), + __unused4(), __unused5() {} +}; + + +struct riscv_statx_timestamp { + target_endian tv_sec; + target_endian tv_nsec; + target_endian __reserved; +}; + +#ifdef HAVE_STATX +struct riscv_statx +{ + target_endian mask; + target_endian blksize; + target_endian attributes; + target_endian nlink; + target_endian uid; + target_endian gid; + target_endian mode; + target_endian __spare0[1]; + target_endian ino; + target_endian size; + target_endian blocks; + target_endian attributes_mask; + struct riscv_statx_timestamp atime; + struct riscv_statx_timestamp btime; + struct riscv_statx_timestamp ctime; + struct riscv_statx_timestamp mtime; + target_endian rdev_major; + target_endian rdev_minor; + target_endian dev_major; + target_endian dev_minor; +#ifdef HAVE_STATX_MNT_ID + target_endian mnt_id; + target_endian __spare2; + target_endian __spare3[12]; +#else + target_endian __spare2[14]; +#endif + + riscv_statx(const struct statx& s, htif_t* htif) + : mask(htif->to_target(s.stx_mask)), + blksize(htif->to_target(s.stx_blksize)), + attributes(htif->to_target(s.stx_attributes)), + nlink(htif->to_target(s.stx_nlink)), + uid(htif->to_target(s.stx_uid)), + gid(htif->to_target(s.stx_gid)), + mode(htif->to_target(s.stx_mode)), __spare0(), + ino(htif->to_target(s.stx_ino)), + size(htif->to_target(s.stx_size)), + blocks(htif->to_target(s.stx_blocks)), + attributes_mask(htif->to_target(s.stx_attributes_mask)), + atime { + htif->to_target(s.stx_atime.tv_sec), + htif->to_target(s.stx_atime.tv_nsec) + }, + btime { + htif->to_target(s.stx_btime.tv_sec), + htif->to_target(s.stx_btime.tv_nsec) + }, + ctime { + htif->to_target(s.stx_ctime.tv_sec), + htif->to_target(s.stx_ctime.tv_nsec) + }, + mtime { + htif->to_target(s.stx_mtime.tv_sec), + htif->to_target(s.stx_mtime.tv_nsec) + }, + rdev_major(htif->to_target(s.stx_rdev_major)), + rdev_minor(htif->to_target(s.stx_rdev_minor)), + dev_major(htif->to_target(s.stx_dev_major)), + dev_minor(htif->to_target(s.stx_dev_minor)), +#ifdef HAVE_STATX_MNT_ID + mnt_id(htif->to_target(s.stx_mnt_id)), + __spare2(), __spare3() +#else + __spare2() +#endif + {} +}; +#endif + +syscall_t::syscall_t(htif_t* htif) + : htif(htif), memif(&htif->memif()), table(2048) +{ + table[17] = &syscall_t::sys_getcwd; + table[25] = &syscall_t::sys_fcntl; + table[34] = &syscall_t::sys_mkdirat; + table[35] = &syscall_t::sys_unlinkat; + table[37] = &syscall_t::sys_linkat; + table[38] = &syscall_t::sys_renameat; + table[46] = &syscall_t::sys_ftruncate; + table[48] = &syscall_t::sys_faccessat; + table[49] = &syscall_t::sys_chdir; + table[56] = &syscall_t::sys_openat; + table[57] = &syscall_t::sys_close; + table[62] = &syscall_t::sys_lseek; + table[63] = &syscall_t::sys_read; + table[64] = &syscall_t::sys_write; + table[67] = &syscall_t::sys_pread; + table[68] = &syscall_t::sys_pwrite; + table[79] = &syscall_t::sys_fstatat; + table[80] = &syscall_t::sys_fstat; + table[93] = &syscall_t::sys_exit; + table[291] = &syscall_t::sys_statx; + table[1039] = &syscall_t::sys_lstat; + table[2011] = &syscall_t::sys_getmainvars; + + register_command(0, std::bind(&syscall_t::handle_syscall, this, _1), "syscall"); + + int stdin_fd = dup(0), stdout_fd0 = dup(1), stdout_fd1 = dup(1); + if (stdin_fd < 0 || stdout_fd0 < 0 || stdout_fd1 < 0) + throw std::runtime_error("could not dup stdin/stdout"); + + fds.alloc(stdin_fd); // stdin -> stdin + fds.alloc(stdout_fd0); // stdout -> stdout + fds.alloc(stdout_fd1); // stderr -> stdout +} + +std::string syscall_t::do_chroot(const char* fn) +{ + if (!chroot.empty() && *fn == '/') + return chroot + fn; + return fn; +} + +std::string syscall_t::undo_chroot(const char* fn) +{ + if (chroot.empty()) + return fn; + if (strncmp(fn, chroot.c_str(), chroot.size()) == 0 + && (chroot.back() == '/' || fn[chroot.size()] == '/')) + return fn + chroot.size() - (chroot.back() == '/'); + return "/"; +} + +void syscall_t::handle_syscall(command_t cmd) +{ + if (cmd.payload() & 1) // test pass/fail + { + htif->exitcode = cmd.payload(); + if (htif->exit_code()) + std::cerr << "*** FAILED *** (tohost = " << htif->exit_code() << ")" << std::endl; + return; + } + else // proxied system call + dispatch(cmd.payload()); + + cmd.respond(1); +} + +reg_t syscall_t::sys_exit(reg_t code, reg_t a1, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + htif->exitcode = code << 1 | 1; + return 0; +} + +static reg_t sysret_errno(sreg_t ret) +{ + return ret == -1 ? -errno : ret; +} + +reg_t syscall_t::sys_read(reg_t fd, reg_t pbuf, reg_t len, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(len); + ssize_t ret = read(fds.lookup(fd), buf.data(), len); + reg_t ret_errno = sysret_errno(ret); + if (ret > 0) + memif->write(pbuf, ret, buf.data()); + return ret_errno; +} + +reg_t syscall_t::sys_pread(reg_t fd, reg_t pbuf, reg_t len, reg_t off, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(len); + ssize_t ret = pread(fds.lookup(fd), buf.data(), len, off); + reg_t ret_errno = sysret_errno(ret); + if (ret > 0) + memif->write(pbuf, ret, buf.data()); + return ret_errno; +} + +reg_t syscall_t::sys_write(reg_t fd, reg_t pbuf, reg_t len, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(len); + memif->read(pbuf, len, buf.data()); + reg_t ret = sysret_errno(write(fds.lookup(fd), buf.data(), len)); + return ret; +} + +reg_t syscall_t::sys_pwrite(reg_t fd, reg_t pbuf, reg_t len, reg_t off, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(len); + memif->read(pbuf, len, buf.data()); + reg_t ret = sysret_errno(pwrite(fds.lookup(fd), buf.data(), len, off)); + return ret; +} + +reg_t syscall_t::sys_close(reg_t fd, reg_t a1, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + if (close(fds.lookup(fd)) < 0) + return sysret_errno(-1); + fds.dealloc(fd); + return 0; +} + +reg_t syscall_t::sys_lseek(reg_t fd, reg_t ptr, reg_t dir, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + return sysret_errno(lseek(fds.lookup(fd), ptr, dir)); +} + +reg_t syscall_t::sys_fstat(reg_t fd, reg_t pbuf, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + struct stat buf; + reg_t ret = sysret_errno(fstat(fds.lookup(fd), &buf)); + if (ret != (reg_t)-1) + { + riscv_stat rbuf(buf, htif); + memif->write(pbuf, sizeof(rbuf), &rbuf); + } + return ret; +} + +reg_t syscall_t::sys_fcntl(reg_t fd, reg_t cmd, reg_t arg, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + return sysret_errno(fcntl(fds.lookup(fd), cmd, arg)); +} + +reg_t syscall_t::sys_ftruncate(reg_t fd, reg_t len, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + return sysret_errno(ftruncate(fds.lookup(fd), len)); +} + +reg_t syscall_t::sys_lstat(reg_t pname, reg_t len, reg_t pbuf, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + + struct stat buf; + reg_t ret = sysret_errno(lstat(do_chroot(name.data()).c_str(), &buf)); + if (ret != (reg_t)-1) + { + riscv_stat rbuf(buf, htif); + memif->write(pbuf, sizeof(rbuf), &rbuf); + } + return ret; +} + +reg_t syscall_t::sys_statx(reg_t fd, reg_t pname, reg_t len, reg_t flags, reg_t mask, reg_t pbuf, reg_t a6) +{ +#ifndef HAVE_STATX + return -ENOSYS; +#else + std::vector name(len); + memif->read(pname, len, name.data()); + + struct statx buf; + reg_t ret = sysret_errno(statx(fds.lookup(fd), do_chroot(name.data()).c_str(), flags, mask, &buf)); + if (ret != (reg_t)-1) + { + riscv_statx rbuf(buf, htif); + memif->write(pbuf, sizeof(rbuf), &rbuf); + } + return ret; +#endif +} + +#define AT_SYSCALL(syscall, fd, name, ...) \ + (syscall(fds.lookup(fd), int(fd) == RISCV_AT_FDCWD ? do_chroot(name).c_str() : (name), __VA_ARGS__)) + +reg_t syscall_t::sys_openat(reg_t dirfd, reg_t pname, reg_t len, reg_t flags, reg_t mode, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + int fd = sysret_errno(AT_SYSCALL(openat, dirfd, name.data(), flags, mode)); + if (fd < 0) + return sysret_errno(-1); + return fds.alloc(fd); +} + +reg_t syscall_t::sys_fstatat(reg_t dirfd, reg_t pname, reg_t len, reg_t pbuf, reg_t flags, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + + struct stat buf; + reg_t ret = sysret_errno(AT_SYSCALL(fstatat, dirfd, name.data(), &buf, flags)); + if (ret != (reg_t)-1) + { + riscv_stat rbuf(buf, htif); + memif->write(pbuf, sizeof(rbuf), &rbuf); + } + return ret; +} + +reg_t syscall_t::sys_faccessat(reg_t dirfd, reg_t pname, reg_t len, reg_t mode, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + return sysret_errno(AT_SYSCALL(faccessat, dirfd, name.data(), mode, 0)); +} + +reg_t syscall_t::sys_renameat(reg_t odirfd, reg_t popath, reg_t olen, reg_t ndirfd, reg_t pnpath, reg_t nlen, reg_t a6) +{ + std::vector opath(olen), npath(nlen); + memif->read(popath, olen, opath.data()); + memif->read(pnpath, nlen, npath.data()); + return sysret_errno(renameat(fds.lookup(odirfd), int(odirfd) == RISCV_AT_FDCWD ? do_chroot(opath.data()).c_str() : opath.data(), + fds.lookup(ndirfd), int(ndirfd) == RISCV_AT_FDCWD ? do_chroot(npath.data()).c_str() : npath.data())); +} + +reg_t syscall_t::sys_linkat(reg_t odirfd, reg_t poname, reg_t olen, reg_t ndirfd, reg_t pnname, reg_t nlen, reg_t flags) +{ + std::vector oname(olen), nname(nlen); + memif->read(poname, olen, oname.data()); + memif->read(pnname, nlen, nname.data()); + return sysret_errno(linkat(fds.lookup(odirfd), int(odirfd) == RISCV_AT_FDCWD ? do_chroot(oname.data()).c_str() : oname.data(), + fds.lookup(ndirfd), int(ndirfd) == RISCV_AT_FDCWD ? do_chroot(nname.data()).c_str() : nname.data(), + flags)); +} + +reg_t syscall_t::sys_unlinkat(reg_t dirfd, reg_t pname, reg_t len, reg_t flags, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + return sysret_errno(AT_SYSCALL(unlinkat, dirfd, name.data(), flags)); +} + +reg_t syscall_t::sys_mkdirat(reg_t dirfd, reg_t pname, reg_t len, reg_t mode, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector name(len); + memif->read(pname, len, name.data()); + return sysret_errno(AT_SYSCALL(mkdirat, dirfd, name.data(), mode)); +} + +reg_t syscall_t::sys_getcwd(reg_t pbuf, reg_t size, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector buf(size); + char* ret = getcwd(buf.data(), size); + if (ret == NULL) + return sysret_errno(-1); + std::string tmp = undo_chroot(buf.data()); + if (size <= tmp.size()) + return -ENOMEM; + memif->write(pbuf, tmp.size() + 1, tmp.data()); + return tmp.size() + 1; +} + +reg_t syscall_t::sys_getmainvars(reg_t pbuf, reg_t limit, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + std::vector args = htif->target_args(); + std::vector> words(args.size() + 3); + words[0] = htif->to_target(args.size()); + words[args.size()+1] = target_endian::zero; // argv[argc] = NULL + words[args.size()+2] = target_endian::zero; // envp[0] = NULL + + size_t sz = (args.size() + 3) * sizeof(words[0]); + for (size_t i = 0; i < args.size(); i++) + { + words[i+1] = htif->to_target(sz + pbuf); + sz += args[i].length() + 1; + } + + std::vector bytes(sz); + memcpy(bytes.data(), words.data(), sizeof(words[0]) * words.size()); + for (size_t i = 0; i < args.size(); i++) + strcpy(&bytes[htif->from_target(words[i+1]) - pbuf], args[i].c_str()); + + if (bytes.size() > limit) + return -ENOMEM; + + memif->write(pbuf, bytes.size(), bytes.data()); + return 0; +} + +reg_t syscall_t::sys_chdir(reg_t path, reg_t a1, reg_t a2, reg_t a3, reg_t a4, reg_t a5, reg_t a6) +{ + size_t size = 0; + while (memif->read_uint8(path + size++)) + ; + std::vector buf(size); + for (size_t offset = 0;; offset++) + { + buf[offset] = memif->read_uint8(path + offset); + if (!buf[offset]) + break; + } + return sysret_errno(chdir(buf.data())); +} + +void syscall_t::dispatch(reg_t mm) +{ + target_endian magicmem[8]; + memif->read(mm, sizeof(magicmem), magicmem); + + reg_t n = htif->from_target(magicmem[0]); + if (n >= table.size() || !table[n]) + throw std::runtime_error("bad syscall #" + std::to_string(n)); + + magicmem[0] = htif->to_target((this->*table[n])(htif->from_target(magicmem[1]), htif->from_target(magicmem[2]), htif->from_target(magicmem[3]), htif->from_target(magicmem[4]), htif->from_target(magicmem[5]), htif->from_target(magicmem[6]), htif->from_target(magicmem[7]))); + + memif->write(mm, sizeof(magicmem), magicmem); +} + +reg_t fds_t::alloc(int fd) +{ + reg_t i; + for (i = 0; i < fds.size(); i++) + if (fds[i] == -1) + break; + + if (i == fds.size()) + fds.resize(i+1); + + fds[i] = fd; + return i; +} + +void fds_t::dealloc(reg_t fd) +{ + fds[fd] = -1; +} + +int fds_t::lookup(reg_t fd) +{ + if (int(fd) == RISCV_AT_FDCWD) + return AT_FDCWD; + return fd >= fds.size() ? -1 : fds[fd]; +} + +void syscall_t::set_chroot(const char* where) +{ + char buf1[PATH_MAX], buf2[PATH_MAX]; + + if (getcwd(buf1, sizeof(buf1)) == NULL + || chdir(where) != 0 + || getcwd(buf2, sizeof(buf2)) == NULL + || chdir(buf1) != 0) + { + fprintf(stderr, "could not chroot to %s\n", where); + exit(-1); + } + + chroot = buf2; +} diff --git a/vendor/riscv-isa-sim/fesvr/syscall.h b/vendor/riscv-isa-sim/fesvr/syscall.h new file mode 100644 index 00000000..4915efd6 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/syscall.h @@ -0,0 +1,73 @@ +// See LICENSE for license details. + +#ifndef __SYSCALL_H +#define __SYSCALL_H + +#include "device.h" +#include "memif.h" +#include +#include + +class syscall_t; +typedef reg_t (syscall_t::*syscall_func_t)(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + +class htif_t; +class memif_t; + +class fds_t +{ + public: + reg_t alloc(int fd); + void dealloc(reg_t fd); + int lookup(reg_t fd); + private: + std::vector fds; +}; + +class syscall_t : public device_t +{ + public: + syscall_t(htif_t*); + + void set_chroot(const char* where); + + private: + const char* identity() { return "syscall_proxy"; } + + htif_t* htif; + memif_t* memif; + std::vector table; + fds_t fds; + + void handle_syscall(command_t cmd); + void dispatch(addr_t mm); + + std::string chroot; + std::string do_chroot(const char* fn); + std::string undo_chroot(const char* fn); + + reg_t sys_exit(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_openat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_read(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_pread(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_write(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_pwrite(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_close(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_lseek(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_fstat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_lstat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_statx(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_fstatat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_faccessat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_fcntl(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_ftruncate(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_renameat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_linkat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_unlinkat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_mkdirat(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_getcwd(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_getmainvars(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); + reg_t sys_chdir(reg_t, reg_t, reg_t, reg_t, reg_t, reg_t, reg_t); +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/term.cc b/vendor/riscv-isa-sim/fesvr/term.cc new file mode 100644 index 00000000..c4cba0c0 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/term.cc @@ -0,0 +1,53 @@ +#include "term.h" +#include +#include +#include +#include +#include + +class canonical_termios_t +{ + public: + canonical_termios_t() + : restore_tios(false) + { + if (tcgetattr(0, &old_tios) == 0) + { + struct termios new_tios = old_tios; + new_tios.c_lflag &= ~(ICANON | ECHO); + if (tcsetattr(0, TCSANOW, &new_tios) == 0) + restore_tios = true; + } + } + + ~canonical_termios_t() + { + if (restore_tios) + tcsetattr(0, TCSANOW, &old_tios); + } + private: + struct termios old_tios; + bool restore_tios; +}; + +static canonical_termios_t tios; // exit() will clean up for us + +int canonical_terminal_t::read() +{ + struct pollfd pfd; + pfd.fd = 0; + pfd.events = POLLIN; + int ret = poll(&pfd, 1, 0); + if (ret <= 0 || !(pfd.revents & POLLIN)) + return -1; + + unsigned char ch; + ret = ::read(0, &ch, 1); + return ret <= 0 ? -1 : ch; +} + +void canonical_terminal_t::write(char ch) +{ + if (::write(1, &ch, 1) != 1) + abort(); +} diff --git a/vendor/riscv-isa-sim/fesvr/term.h b/vendor/riscv-isa-sim/fesvr/term.h new file mode 100644 index 00000000..7a2c22fc --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/term.h @@ -0,0 +1,11 @@ +#ifndef _TERM_H +#define _TERM_H + +class canonical_terminal_t +{ + public: + static int read(); + static void write(char); +}; + +#endif diff --git a/vendor/riscv-isa-sim/fesvr/tsi.cc b/vendor/riscv-isa-sim/fesvr/tsi.cc new file mode 100644 index 00000000..5ccafc4b --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/tsi.cc @@ -0,0 +1,115 @@ +#include "tsi.h" +#include +#include + +#define NHARTS_MAX 16 + +void tsi_t::host_thread(void *arg) +{ + tsi_t *tsi = static_cast(arg); + tsi->run(); + + while (true) + tsi->target->switch_to(); +} + +tsi_t::tsi_t(int argc, char** argv) : htif_t(argc, argv) +{ + target = context_t::current(); + host.init(host_thread, this); +} + +tsi_t::~tsi_t(void) +{ +} + +#define MSIP_BASE 0x2000000 + +// Interrupt core 0 to make it start executing the program in DRAM +void tsi_t::reset() +{ + uint32_t one = 1; + + write_chunk(MSIP_BASE, sizeof(uint32_t), &one); +} + +void tsi_t::push_addr(addr_t addr) +{ + for (int i = 0; i < SAI_ADDR_CHUNKS; i++) { + in_data.push_back(addr & 0xffffffff); + addr = addr >> 32; + } +} + +void tsi_t::push_len(addr_t len) +{ + for (int i = 0; i < SAI_LEN_CHUNKS; i++) { + in_data.push_back(len & 0xffffffff); + len = len >> 32; + } +} + +void tsi_t::read_chunk(addr_t taddr, size_t nbytes, void* dst) +{ + uint32_t *result = static_cast(dst); + size_t len = nbytes / sizeof(uint32_t); + + in_data.push_back(SAI_CMD_READ); + push_addr(taddr); + push_len(len - 1); + + for (size_t i = 0; i < len; i++) { + while (out_data.empty()) + switch_to_target(); + result[i] = out_data.front(); + out_data.pop_front(); + } +} + +void tsi_t::write_chunk(addr_t taddr, size_t nbytes, const void* src) +{ + const uint32_t *src_data = static_cast(src); + size_t len = nbytes / sizeof(uint32_t); + + in_data.push_back(SAI_CMD_WRITE); + push_addr(taddr); + push_len(len - 1); + + in_data.insert(in_data.end(), src_data, src_data + len); +} + +void tsi_t::send_word(uint32_t word) +{ + out_data.push_back(word); +} + +uint32_t tsi_t::recv_word(void) +{ + uint32_t word = in_data.front(); + in_data.pop_front(); + return word; +} + +bool tsi_t::data_available(void) +{ + return !in_data.empty(); +} + +void tsi_t::switch_to_host(void) +{ + host.switch_to(); +} + +void tsi_t::switch_to_target(void) +{ + target->switch_to(); +} + +void tsi_t::tick(bool out_valid, uint32_t out_bits, bool in_ready) +{ + if (out_valid && out_ready()) + out_data.push_back(out_bits); + + if (in_valid() && in_ready) + in_data.pop_front(); +} diff --git a/vendor/riscv-isa-sim/fesvr/tsi.h b/vendor/riscv-isa-sim/fesvr/tsi.h new file mode 100644 index 00000000..825a3a00 --- /dev/null +++ b/vendor/riscv-isa-sim/fesvr/tsi.h @@ -0,0 +1,57 @@ +#ifndef __SAI_H +#define __SAI_H + +#include "htif.h" +#include "context.h" + +#include +#include +#include +#include + +#define SAI_CMD_READ 0 +#define SAI_CMD_WRITE 1 + +#define SAI_ADDR_CHUNKS 2 +#define SAI_LEN_CHUNKS 2 + +class tsi_t : public htif_t +{ + public: + tsi_t(int argc, char** argv); + virtual ~tsi_t(); + + bool data_available(); + void send_word(uint32_t word); + uint32_t recv_word(); + void switch_to_host(); + + uint32_t in_bits() { return in_data.front(); } + bool in_valid() { return !in_data.empty(); } + bool out_ready() { return true; } + void tick(bool out_valid, uint32_t out_bits, bool in_ready); + + protected: + void reset() override; + void read_chunk(addr_t taddr, size_t nbytes, void* dst) override; + void write_chunk(addr_t taddr, size_t nbytes, const void* src) override; + void switch_to_target(); + + size_t chunk_align() override { return 4; } + size_t chunk_max_size() override { return 1024; } + + int get_ipi_addrs(addr_t *addrs); + + private: + context_t host; + context_t* target; + std::deque in_data; + std::deque out_data; + + void push_addr(addr_t addr); + void push_len(addr_t len); + + static void host_thread(void *tsi); +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv-disasm.pc.in b/vendor/riscv-isa-sim/riscv-disasm.pc.in new file mode 100644 index 00000000..8e022e93 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv-disasm.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@prefix@ +libdir=${prefix}/@libdir@ +includedir=${prefix}/@includedir@ + +Name: riscv-disasm +Description: RISC-V disassembler +Version: git +Libs: -Wl,-rpath,${libdir} -L${libdir} -ldisasm +Cflags: -I${includedir} +URL: http://riscv.org/download.html#tab_disasm diff --git a/vendor/riscv-isa-sim/riscv-fesvr.pc.in b/vendor/riscv-isa-sim/riscv-fesvr.pc.in new file mode 100644 index 00000000..efd7eed1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv-fesvr.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@prefix@ +libdir=${prefix}/@libdir@ +includedir=${prefix}/@includedir@ + +Name: riscv-fesvr +Description: RISC-V front-end server +Version: git +Libs: -Wl,-rpath,${libdir} -L${libdir} -lfesvr +Cflags: -I${includedir} +URL: http://riscv.org/download.html#tab_fesvr diff --git a/vendor/riscv-isa-sim/riscv/abstract_device.h b/vendor/riscv-isa-sim/riscv/abstract_device.h new file mode 100644 index 00000000..559c64f6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/abstract_device.h @@ -0,0 +1,15 @@ +#ifndef _RISCV_ABSTRACT_DEVICE_H +#define _RISCV_ABSTRACT_DEVICE_H + +#include "decode.h" +#include +#include + +class abstract_device_t { + public: + virtual bool load(reg_t addr, size_t len, uint8_t* bytes) = 0; + virtual bool store(reg_t addr, size_t len, const uint8_t* bytes) = 0; + virtual ~abstract_device_t() {} +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/arith.h b/vendor/riscv-isa-sim/riscv/arith.h new file mode 100644 index 00000000..9e0c2f74 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/arith.h @@ -0,0 +1,216 @@ +// See LICENSE for license details. + +#ifndef _RISCV_ARITH_H +#define _RISCV_ARITH_H + +#include +#include +#include +#include + +inline uint64_t mulhu(uint64_t a, uint64_t b) +{ + uint64_t t; + uint32_t y1, y2, y3; + uint64_t a0 = (uint32_t)a, a1 = a >> 32; + uint64_t b0 = (uint32_t)b, b1 = b >> 32; + + t = a1*b0 + ((a0*b0) >> 32); + y1 = t; + y2 = t >> 32; + + t = a0*b1 + y1; + y1 = t; + + t = a1*b1 + y2 + (t >> 32); + y2 = t; + y3 = t >> 32; + + return ((uint64_t)y3 << 32) | y2; +} + +inline int64_t mulh(int64_t a, int64_t b) +{ + int negate = (a < 0) != (b < 0); + uint64_t res = mulhu(a < 0 ? -a : a, b < 0 ? -b : b); + return negate ? ~res + (a * b == 0) : res; +} + +inline int64_t mulhsu(int64_t a, uint64_t b) +{ + int negate = a < 0; + uint64_t res = mulhu(a < 0 ? -a : a, b); + return negate ? ~res + (a * b == 0) : res; +} + +//ref: https://locklessinc.com/articles/sat_arithmetic/ +template +static inline T sat_add(T x, T y, bool &sat) +{ + UT ux = x; + UT uy = y; + UT res = ux + uy; + sat = false; + int sh = sizeof(T) * 8 - 1; + + /* Calculate overflowed result. (Don't change the sign bit of ux) */ + ux = (ux >> sh) + (((UT)0x1 << sh) - 1); + + /* Force compiler to use cmovns instruction */ + if ((T) ((ux ^ uy) | ~(uy ^ res)) >= 0) { + res = ux; + sat = true; + } + + return res; +} + +template +static inline T sat_add(T x, T y, T z, bool &sat) +{ + bool sat1, sat2; + T a = y; + T b = z; + T res; + + /* Force compiler to use cmovs instruction */ + if (((y ^ z) & (x ^ z)) < 0) { + a = z; + b = y; + } + + res = sat_add(x, a, sat1); + res = sat_add(res, b, sat2); + sat = sat1 || sat2; + + return res; +} + +template +static inline T sat_sub(T x, T y, bool &sat) +{ + UT ux = x; + UT uy = y; + UT res = ux - uy; + sat = false; + int sh = sizeof(T) * 8 - 1; + + /* Calculate overflowed result. (Don't change the sign bit of ux) */ + ux = (ux >> sh) + (((UT)0x1 << sh) - 1); + + /* Force compiler to use cmovns instruction */ + if ((T) ((ux ^ uy) & (ux ^ res)) < 0) { + res = ux; + sat = true; + } + + return res; +} + +template +T sat_addu(T x, T y, bool &sat) +{ + T res = x + y; + sat = false; + + sat = res < x; + res |= -(res < x); + + return res; +} + +template +T sat_subu(T x, T y, bool &sat) +{ + T res = x - y; + sat = false; + + sat = !(res <= x); + res &= -(res <= x); + + return res; +} + +static inline uint64_t extract64(uint64_t val, int pos, int len) +{ + assert(pos >= 0 && len > 0 && len <= 64 - pos); + return (val >> pos) & (~UINT64_C(0) >> (64 - len)); +} + +static inline uint64_t make_mask64(int pos, int len) +{ + assert(pos >= 0 && len > 0 && pos < 64 && len <= 64); + return (UINT64_MAX >> (64 - len)) << pos; +} + +static inline int popcount(uint64_t val) +{ + val = (val & 0x5555555555555555U) + ((val >> 1) & 0x5555555555555555U); + val = (val & 0x3333333333333333U) + ((val >> 2) & 0x3333333333333333U); + val = (val & 0x0f0f0f0f0f0f0f0fU) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fU); + val = (val & 0x00ff00ff00ff00ffU) + ((val >> 8) & 0x00ff00ff00ff00ffU); + val = (val & 0x0000ffff0000ffffU) + ((val >> 16) & 0x0000ffff0000ffffU); + val = (val & 0x00000000ffffffffU) + ((val >> 32) & 0x00000000ffffffffU); + return val; +} + +static inline int ctz(uint64_t val) +{ + if (!val) + return 0; + + int res = 0; + + if ((val << 32) == 0) res += 32, val >>= 32; + if ((val << 48) == 0) res += 16, val >>= 16; + if ((val << 56) == 0) res += 8, val >>= 8; + if ((val << 60) == 0) res += 4, val >>= 4; + if ((val << 62) == 0) res += 2, val >>= 2; + if ((val << 63) == 0) res += 1, val >>= 1; + + return res; +} + +static inline int clz(uint64_t val) +{ + if (!val) + return 0; + + int res = 0; + + if ((val >> 32) == 0) res += 32, val <<= 32; + if ((val >> 48) == 0) res += 16, val <<= 16; + if ((val >> 56) == 0) res += 8, val <<= 8; + if ((val >> 60) == 0) res += 4, val <<= 4; + if ((val >> 62) == 0) res += 2, val <<= 2; + if ((val >> 63) == 0) res += 1, val <<= 1; + + return res; +} + +static inline int log2(uint64_t val) +{ + if (!val) + return 0; + + return 63 - clz(val); +} + +static inline uint64_t xperm(uint64_t rs1, uint64_t rs2, size_t sz_log2, size_t len) +{ + uint64_t r = 0; + uint64_t sz = 1LL << sz_log2; + uint64_t mask = (1LL << sz) - 1; + + assert(sz_log2 <= 6 && len <= 64); + + for (size_t i = 0; i < len; i += sz) { + uint64_t pos = ((rs2 >> i) & mask) << sz_log2; + if (pos < len) + r |= ((rs1 >> pos) & mask) << i; + } + + return r; +} + +#endif diff --git a/vendor/riscv-isa-sim/riscv/cachesim.cc b/vendor/riscv-isa-sim/riscv/cachesim.cc new file mode 100644 index 00000000..48840cb4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/cachesim.cc @@ -0,0 +1,210 @@ +// See LICENSE for license details. + +#include "cachesim.h" +#include "common.h" +#include +#include +#include + +cache_sim_t::cache_sim_t(size_t _sets, size_t _ways, size_t _linesz, const char* _name) +: sets(_sets), ways(_ways), linesz(_linesz), name(_name), log(false) +{ + init(); +} + +static void help() +{ + std::cerr << "Cache configurations must be of the form" << std::endl; + std::cerr << " sets:ways:blocksize" << std::endl; + std::cerr << "where sets, ways, and blocksize are positive integers, with" << std::endl; + std::cerr << "sets and blocksize both powers of two and blocksize at least 8." << std::endl; + exit(1); +} + +cache_sim_t* cache_sim_t::construct(const char* config, const char* name) +{ + const char* wp = strchr(config, ':'); + if (!wp++) help(); + const char* bp = strchr(wp, ':'); + if (!bp++) help(); + + size_t sets = atoi(std::string(config, wp).c_str()); + size_t ways = atoi(std::string(wp, bp).c_str()); + size_t linesz = atoi(bp); + + if (ways > 4 /* empirical */ && sets == 1) + return new fa_cache_sim_t(ways, linesz, name); + return new cache_sim_t(sets, ways, linesz, name); +} + +void cache_sim_t::init() +{ + if(sets == 0 || (sets & (sets-1))) + help(); + if(linesz < 8 || (linesz & (linesz-1))) + help(); + + idx_shift = 0; + for (size_t x = linesz; x>1; x >>= 1) + idx_shift++; + + tags = new uint64_t[sets*ways](); + read_accesses = 0; + read_misses = 0; + bytes_read = 0; + write_accesses = 0; + write_misses = 0; + bytes_written = 0; + writebacks = 0; + + miss_handler = NULL; +} + +cache_sim_t::cache_sim_t(const cache_sim_t& rhs) + : sets(rhs.sets), ways(rhs.ways), linesz(rhs.linesz), + idx_shift(rhs.idx_shift), name(rhs.name), log(false) +{ + tags = new uint64_t[sets*ways]; + memcpy(tags, rhs.tags, sets*ways*sizeof(uint64_t)); +} + +cache_sim_t::~cache_sim_t() +{ + print_stats(); + delete [] tags; +} + +void cache_sim_t::print_stats() +{ + if(read_accesses + write_accesses == 0) + return; + + float mr = 100.0f*(read_misses+write_misses)/(read_accesses+write_accesses); + + std::cout << std::setprecision(3) << std::fixed; + std::cout << name << " "; + std::cout << "Bytes Read: " << bytes_read << std::endl; + std::cout << name << " "; + std::cout << "Bytes Written: " << bytes_written << std::endl; + std::cout << name << " "; + std::cout << "Read Accesses: " << read_accesses << std::endl; + std::cout << name << " "; + std::cout << "Write Accesses: " << write_accesses << std::endl; + std::cout << name << " "; + std::cout << "Read Misses: " << read_misses << std::endl; + std::cout << name << " "; + std::cout << "Write Misses: " << write_misses << std::endl; + std::cout << name << " "; + std::cout << "Writebacks: " << writebacks << std::endl; + std::cout << name << " "; + std::cout << "Miss Rate: " << mr << '%' << std::endl; +} + +uint64_t* cache_sim_t::check_tag(uint64_t addr) +{ + size_t idx = (addr >> idx_shift) & (sets-1); + size_t tag = (addr >> idx_shift) | VALID; + + for (size_t i = 0; i < ways; i++) + if (tag == (tags[idx*ways + i] & ~DIRTY)) + return &tags[idx*ways + i]; + + return NULL; +} + +uint64_t cache_sim_t::victimize(uint64_t addr) +{ + size_t idx = (addr >> idx_shift) & (sets-1); + size_t way = lfsr.next() % ways; + uint64_t victim = tags[idx*ways + way]; + tags[idx*ways + way] = (addr >> idx_shift) | VALID; + return victim; +} + +void cache_sim_t::access(uint64_t addr, size_t bytes, bool store) +{ + store ? write_accesses++ : read_accesses++; + (store ? bytes_written : bytes_read) += bytes; + + uint64_t* hit_way = check_tag(addr); + if (likely(hit_way != NULL)) + { + if (store) + *hit_way |= DIRTY; + return; + } + + store ? write_misses++ : read_misses++; + if (log) + { + std::cerr << name << " " + << (store ? "write" : "read") << " miss 0x" + << std::hex << addr << std::endl; + } + + uint64_t victim = victimize(addr); + + if ((victim & (VALID | DIRTY)) == (VALID | DIRTY)) + { + uint64_t dirty_addr = (victim & ~(VALID | DIRTY)) << idx_shift; + if (miss_handler) + miss_handler->access(dirty_addr, linesz, true); + writebacks++; + } + + if (miss_handler) + miss_handler->access(addr & ~(linesz-1), linesz, false); + + if (store) + *check_tag(addr) |= DIRTY; +} + +void cache_sim_t::clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval) +{ + uint64_t start_addr = addr & ~(linesz-1); + uint64_t end_addr = (addr + bytes + linesz-1) & ~(linesz-1); + uint64_t cur_addr = start_addr; + while (cur_addr < end_addr) { + uint64_t* hit_way = check_tag(cur_addr); + if (likely(hit_way != NULL)) + { + if (clean) { + if (*hit_way & DIRTY) { + writebacks++; + *hit_way &= ~DIRTY; + } + } + + if (inval) + *hit_way &= ~VALID; + } + cur_addr += linesz; + } + if (miss_handler) + miss_handler->clean_invalidate(addr, bytes, clean, inval); +} + +fa_cache_sim_t::fa_cache_sim_t(size_t ways, size_t linesz, const char* name) + : cache_sim_t(1, ways, linesz, name) +{ +} + +uint64_t* fa_cache_sim_t::check_tag(uint64_t addr) +{ + auto it = tags.find(addr >> idx_shift); + return it == tags.end() ? NULL : &it->second; +} + +uint64_t fa_cache_sim_t::victimize(uint64_t addr) +{ + uint64_t old_tag = 0; + if (tags.size() == ways) + { + auto it = tags.begin(); + std::advance(it, lfsr.next() % ways); + old_tag = it->second; + tags.erase(it); + } + tags[addr >> idx_shift] = (addr >> idx_shift) | VALID; + return old_tag; +} diff --git a/vendor/riscv-isa-sim/riscv/cachesim.h b/vendor/riscv-isa-sim/riscv/cachesim.h new file mode 100644 index 00000000..b7f90143 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/cachesim.h @@ -0,0 +1,135 @@ +// See LICENSE for license details. + +#ifndef _RISCV_CACHE_SIM_H +#define _RISCV_CACHE_SIM_H + +#include "memtracer.h" +#include +#include +#include +#include + +class lfsr_t +{ + public: + lfsr_t() : reg(1) {} + lfsr_t(const lfsr_t& lfsr) : reg(lfsr.reg) {} + uint32_t next() { return reg = (reg>>1)^(-(reg&1) & 0xd0000001); } + private: + uint32_t reg; +}; + +class cache_sim_t +{ + public: + cache_sim_t(size_t sets, size_t ways, size_t linesz, const char* name); + cache_sim_t(const cache_sim_t& rhs); + virtual ~cache_sim_t(); + + void access(uint64_t addr, size_t bytes, bool store); + void clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval); + void print_stats(); + void set_miss_handler(cache_sim_t* mh) { miss_handler = mh; } + void set_log(bool _log) { log = _log; } + + static cache_sim_t* construct(const char* config, const char* name); + + protected: + static const uint64_t VALID = 1ULL << 63; + static const uint64_t DIRTY = 1ULL << 62; + + virtual uint64_t* check_tag(uint64_t addr); + virtual uint64_t victimize(uint64_t addr); + + lfsr_t lfsr; + cache_sim_t* miss_handler; + + size_t sets; + size_t ways; + size_t linesz; + size_t idx_shift; + + uint64_t* tags; + + uint64_t read_accesses; + uint64_t read_misses; + uint64_t bytes_read; + uint64_t write_accesses; + uint64_t write_misses; + uint64_t bytes_written; + uint64_t writebacks; + + std::string name; + bool log; + + void init(); +}; + +class fa_cache_sim_t : public cache_sim_t +{ + public: + fa_cache_sim_t(size_t ways, size_t linesz, const char* name); + uint64_t* check_tag(uint64_t addr); + uint64_t victimize(uint64_t addr); + private: + static bool cmp(uint64_t a, uint64_t b); + std::map tags; +}; + +class cache_memtracer_t : public memtracer_t +{ + public: + cache_memtracer_t(const char* config, const char* name) + { + cache = cache_sim_t::construct(config, name); + } + ~cache_memtracer_t() + { + delete cache; + } + void set_miss_handler(cache_sim_t* mh) + { + cache->set_miss_handler(mh); + } + void clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval) + { + cache->clean_invalidate(addr, bytes, clean, inval); + } + void set_log(bool log) + { + cache->set_log(log); + } + + protected: + cache_sim_t* cache; +}; + +class icache_sim_t : public cache_memtracer_t +{ + public: + icache_sim_t(const char* config) : cache_memtracer_t(config, "I$") {} + bool interested_in_range(uint64_t begin, uint64_t end, access_type type) + { + return type == FETCH; + } + void trace(uint64_t addr, size_t bytes, access_type type) + { + if (type == FETCH) cache->access(addr, bytes, false); + } +}; + +class dcache_sim_t : public cache_memtracer_t +{ + public: + dcache_sim_t(const char* config) : cache_memtracer_t(config, "D$") {} + bool interested_in_range(uint64_t begin, uint64_t end, access_type type) + { + return type == LOAD || type == STORE; + } + void trace(uint64_t addr, size_t bytes, access_type type) + { + if (type == LOAD || type == STORE) cache->access(addr, bytes, type == STORE); + } +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/cfg.h b/vendor/riscv-isa-sim/riscv/cfg.h new file mode 100644 index 00000000..6369bd84 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/cfg.h @@ -0,0 +1,88 @@ +// See LICENSE for license details. +#ifndef _RISCV_CFG_H +#define _RISCV_CFG_H + +#include +#include "decode.h" +#include "mmu.h" +#include + +template +class cfg_arg_t { +public: + cfg_arg_t(T default_val) + : value(default_val), was_set(false) {} + + bool overridden() const { return was_set; } + + T operator()() const { return value; } + + T operator=(const T v) { + value = v; + was_set = true; + return value; + } + +private: + T value; + bool was_set; +}; + +// Configuration that describes a memory region +class mem_cfg_t +{ +public: + mem_cfg_t(reg_t base, reg_t size) + : base(base), size(size) + { + // The truth of these assertions should be ensured by whatever is creating + // the regions in the first place, but we have them here to make sure that + // we can't end up describing memory regions that don't make sense. They + // ask that the page size is a multiple of the minimum page size, that the + // page is aligned to the minimum page size, that the page is non-empty and + // that the top address is still representable in a reg_t. + assert((size % PGSIZE == 0) && + (base % PGSIZE == 0) && + (base + size > base)); + } + + reg_t base; + reg_t size; +}; + +class cfg_t +{ +public: + cfg_t(std::pair default_initrd_bounds, + const char *default_bootargs, + const char *default_isa, const char *default_priv, + const char *default_varch, + const std::vector &default_mem_layout, + const std::vector default_hartids, + bool default_real_time_clint) + : initrd_bounds(default_initrd_bounds), + bootargs(default_bootargs), + isa(default_isa), + priv(default_priv), + varch(default_varch), + mem_layout(default_mem_layout), + hartids(default_hartids), + explicit_hartids(false), + real_time_clint(default_real_time_clint) + {} + + cfg_arg_t> initrd_bounds; + cfg_arg_t bootargs; + cfg_arg_t isa; + cfg_arg_t priv; + cfg_arg_t varch; + cfg_arg_t> mem_layout; + std::optional start_pc; + cfg_arg_t> hartids; + bool explicit_hartids; + cfg_arg_t real_time_clint; + + size_t nprocs() const { return hartids().size(); } +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/clint.cc b/vendor/riscv-isa-sim/riscv/clint.cc new file mode 100644 index 00000000..72d1bbeb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/clint.cc @@ -0,0 +1,89 @@ +#include +#include "devices.h" +#include "processor.h" + +clint_t::clint_t(std::vector& procs, uint64_t freq_hz, bool real_time) + : procs(procs), freq_hz(freq_hz), real_time(real_time), mtime(0), mtimecmp(procs.size()) +{ + struct timeval base; + + gettimeofday(&base, NULL); + + real_time_ref_secs = base.tv_sec; + real_time_ref_usecs = base.tv_usec; +} + +/* 0000 msip hart 0 + * 0004 msip hart 1 + * 4000 mtimecmp hart 0 lo + * 4004 mtimecmp hart 0 hi + * 4008 mtimecmp hart 1 lo + * 400c mtimecmp hart 1 hi + * bff8 mtime lo + * bffc mtime hi + */ + +#define MSIP_BASE 0x0 +#define MTIMECMP_BASE 0x4000 +#define MTIME_BASE 0xbff8 + +bool clint_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + increment(0); + if (addr >= MSIP_BASE && addr + len <= MSIP_BASE + procs.size()*sizeof(msip_t)) { + std::vector msip(procs.size()); + for (size_t i = 0; i < procs.size(); ++i) + msip[i] = !!(procs[i]->state.mip->read() & MIP_MSIP); + memcpy(bytes, (uint8_t*)&msip[0] + addr - MSIP_BASE, len); + } else if (addr >= MTIMECMP_BASE && addr + len <= MTIMECMP_BASE + procs.size()*sizeof(mtimecmp_t)) { + memcpy(bytes, (uint8_t*)&mtimecmp[0] + addr - MTIMECMP_BASE, len); + } else if (addr >= MTIME_BASE && addr + len <= MTIME_BASE + sizeof(mtime_t)) { + memcpy(bytes, (uint8_t*)&mtime + addr - MTIME_BASE, len); + } else { + return false; + } + return true; +} + +bool clint_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + if (addr >= MSIP_BASE && addr + len <= MSIP_BASE + procs.size()*sizeof(msip_t)) { + std::vector msip(procs.size()); + std::vector mask(procs.size(), 0); + memcpy((uint8_t*)&msip[0] + addr - MSIP_BASE, bytes, len); + memset((uint8_t*)&mask[0] + addr - MSIP_BASE, 0xff, len); + for (size_t i = 0; i < procs.size(); ++i) { + if (!(mask[i] & 0xFF)) continue; + procs[i]->state.mip->backdoor_write_with_mask(MIP_MSIP, 0); + if (!!(msip[i] & 1)) + procs[i]->state.mip->backdoor_write_with_mask(MIP_MSIP, MIP_MSIP); + } + } else if (addr >= MTIMECMP_BASE && addr + len <= MTIMECMP_BASE + procs.size()*sizeof(mtimecmp_t)) { + memcpy((uint8_t*)&mtimecmp[0] + addr - MTIMECMP_BASE, bytes, len); + } else if (addr >= MTIME_BASE && addr + len <= MTIME_BASE + sizeof(mtime_t)) { + memcpy((uint8_t*)&mtime + addr - MTIME_BASE, bytes, len); + } else { + return false; + } + increment(0); + return true; +} + +void clint_t::increment(reg_t inc) +{ + if (real_time) { + struct timeval now; + uint64_t diff_usecs; + + gettimeofday(&now, NULL); + diff_usecs = ((now.tv_sec - real_time_ref_secs) * 1000000) + (now.tv_usec - real_time_ref_usecs); + mtime = diff_usecs * freq_hz / 1000000; + } else { + mtime += inc; + } + for (size_t i = 0; i < procs.size(); i++) { + procs[i]->state.mip->backdoor_write_with_mask(MIP_MTIP, 0); + if (mtime >= mtimecmp[i]) + procs[i]->state.mip->backdoor_write_with_mask(MIP_MTIP, MIP_MTIP); + } +} diff --git a/vendor/riscv-isa-sim/riscv/common.h b/vendor/riscv-isa-sim/riscv/common.h new file mode 100644 index 00000000..002a83f0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/common.h @@ -0,0 +1,18 @@ +// See LICENSE for license details. + +#ifndef _RISCV_COMMON_H +#define _RISCV_COMMON_H + +#ifdef __GNUC__ +# define likely(x) __builtin_expect(x, 1) +# define unlikely(x) __builtin_expect(x, 0) +# define NOINLINE __attribute__ ((noinline)) +# define NORETURN __attribute__ ((noreturn)) +#else +# define likely(x) (x) +# define unlikely(x) (x) +# define NOINLINE +# define NORETURN +#endif + +#endif diff --git a/vendor/riscv-isa-sim/riscv/csrs.cc b/vendor/riscv-isa-sim/riscv/csrs.cc new file mode 100644 index 00000000..f31022fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/csrs.cc @@ -0,0 +1,1297 @@ +// See LICENSE for license details. + +#include "csrs.h" +// For processor_t: +#include "processor.h" +#include "mmu.h" +// For get_field(): +#include "decode.h" +// For trap_virtual_instruction and trap_illegal_instruction: +#include "trap.h" +// For require(): +#include "insn_macros.h" + +// STATE macro used by require_privilege() macro: +#undef STATE +#define STATE (*state) + + +// implement class csr_t +csr_t::csr_t(processor_t* const proc, const reg_t addr): + proc(proc), + state(proc->get_state()), + address(addr), + csr_priv(get_field(addr, 0x300)), + csr_read_only(get_field(addr, 0xC00) == 3) { +} + +void csr_t::verify_permissions(insn_t insn, bool write) const { + // Check permissions. Raise virtual-instruction exception if V=1, + // privileges are insufficient, and the CSR belongs to supervisor or + // hypervisor. Raise illegal-instruction exception otherwise. + unsigned priv = state->prv == PRV_S && !state->v ? PRV_HS : state->prv; + + if ((csr_priv == PRV_S && !proc->extension_enabled('S')) || + (csr_priv == PRV_HS && !proc->extension_enabled('H'))) + throw trap_illegal_instruction(insn.bits()); + + if (write && csr_read_only) + throw trap_illegal_instruction(insn.bits()); + if (priv < csr_priv) { + if (state->v && csr_priv <= PRV_HS) + throw trap_virtual_instruction(insn.bits()); + throw trap_illegal_instruction(insn.bits()); + } +} + + +csr_t::~csr_t() { +} + +void csr_t::write(const reg_t val) noexcept { + const bool success = unlogged_write(val); + if (success) { + log_write(); + } +} + +void csr_t::log_write() const noexcept { + log_special_write(address, written_value()); +} + +void csr_t::log_special_write(const reg_t address, const reg_t val) const noexcept { +#if defined(RISCV_ENABLE_COMMITLOG) + proc->get_state()->log_reg_write[((address) << 4) | 4] = {val, 0}; +#endif +} + +reg_t csr_t::written_value() const noexcept { + return read(); +} + +// implement class basic_csr_t +basic_csr_t::basic_csr_t(processor_t* const proc, const reg_t addr, const reg_t init): + csr_t(proc, addr), + val(init) { +} + +bool basic_csr_t::unlogged_write(const reg_t val) noexcept { + this->val = val; + return true; +} + + +// implement class pmpaddr_csr_t +pmpaddr_csr_t::pmpaddr_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0), + cfg(0), + pmpidx(address - CSR_PMPADDR0) { +} + + +void pmpaddr_csr_t::verify_permissions(insn_t insn, bool write) const { + csr_t::verify_permissions(insn, write); + // If n_pmp is zero, that means pmp is not implemented hence raise + // trap if it tries to access the csr. I would prefer to implement + // this by not instantiating any pmpaddr_csr_t for these regs, but + // n_pmp can change after reset() is run. + if (proc->n_pmp == 0) + throw trap_illegal_instruction(insn.bits()); +} + + +reg_t pmpaddr_csr_t::read() const noexcept { + if ((cfg & PMP_A) >= PMP_NAPOT) + return val | (~proc->pmp_tor_mask() >> 1); + return val & proc->pmp_tor_mask(); +} + + +bool pmpaddr_csr_t::unlogged_write(const reg_t val) noexcept { + // If no PMPs are configured, disallow access to all. Otherwise, + // allow access to all, but unimplemented ones are hardwired to + // zero. Note that n_pmp can change after reset(); otherwise I would + // implement this in state_t::reset() by instantiating the correct + // number of pmpaddr_csr_t. + if (proc->n_pmp == 0) + return false; + + bool lock_bypass = state->mseccfg->get_rlb(); + bool locked = (cfg & PMP_L) && !lock_bypass; + + if (pmpidx < proc->n_pmp && !locked && !next_locked_and_tor()) { + this->val = val & ((reg_t(1) << (MAX_PADDR_BITS - PMP_SHIFT)) - 1); + } + else + return false; + proc->get_mmu()->flush_tlb(); + return true; +} + +bool pmpaddr_csr_t::next_locked_and_tor() const noexcept { + if (pmpidx+1 >= state->max_pmp) return false; // this is the last entry + bool lock_bypass = state->mseccfg->get_rlb(); + bool next_locked = (state->pmpaddr[pmpidx+1]->cfg & PMP_L) && !lock_bypass; + bool next_tor = (state->pmpaddr[pmpidx+1]->cfg & PMP_A) == PMP_TOR; + return next_locked && next_tor; +} + + +reg_t pmpaddr_csr_t::tor_paddr() const noexcept { + return (val & proc->pmp_tor_mask()) << PMP_SHIFT; +} + + +reg_t pmpaddr_csr_t::tor_base_paddr() const noexcept { + if (pmpidx == 0) return 0; // entry 0 always uses 0 as base + return state->pmpaddr[pmpidx-1]->tor_paddr(); +} + + +reg_t pmpaddr_csr_t::napot_mask() const noexcept { + bool is_na4 = (cfg & PMP_A) == PMP_NA4; + reg_t mask = (val << 1) | (!is_na4) | ~proc->pmp_tor_mask(); + return ~(mask & ~(mask + 1)) << PMP_SHIFT; +} + + +bool pmpaddr_csr_t::match4(reg_t addr) const noexcept { + if ((cfg & PMP_A) == 0) return false; + bool is_tor = (cfg & PMP_A) == PMP_TOR; + if (is_tor) return tor_base_paddr() <= addr && addr < tor_paddr(); + // NAPOT or NA4: + return ((addr ^ tor_paddr()) & napot_mask()) == 0; +} + + +bool pmpaddr_csr_t::subset_match(reg_t addr, reg_t len) const noexcept { + if ((addr | len) & (len - 1)) + abort(); + reg_t base = tor_base_paddr(); + reg_t tor = tor_paddr(); + + if ((cfg & PMP_A) == 0) return false; + + bool is_tor = (cfg & PMP_A) == PMP_TOR; + bool begins_after_lower = addr >= base; + bool begins_after_upper = addr >= tor; + bool ends_before_lower = (addr & -len) < (base & -len); + bool ends_before_upper = (addr & -len) < (tor & -len); + bool tor_homogeneous = ends_before_lower || begins_after_upper || + (begins_after_lower && ends_before_upper); + + bool mask_homogeneous = ~(napot_mask() << 1) & len; + bool napot_homogeneous = mask_homogeneous || ((addr ^ tor) / len) != 0; + + return !(is_tor ? tor_homogeneous : napot_homogeneous); +} + + +bool pmpaddr_csr_t::access_ok(access_type type, reg_t mode) const noexcept { + bool cfgx = cfg & PMP_X; + bool cfgw = cfg & PMP_W; + bool cfgr = cfg & PMP_R; + bool cfgl = cfg & PMP_L; + + bool prvm = mode == PRV_M; + + bool typer = type == LOAD; + bool typex = type == FETCH; + bool typew = type == STORE; + bool normal_rwx = (typer && cfgr) || (typew && cfgw) || (typex && cfgx); + bool mseccfg_mml = state->mseccfg->get_mml(); + + if (mseccfg_mml) { + if (cfgx && cfgw && cfgr && cfgl) { + // Locked Shared data region: Read only on both M and S/U mode. + return typer; + } else { + bool mml_shared_region = !cfgr && cfgw; + bool mml_chk_normal = (prvm == cfgl) && normal_rwx; + bool mml_chk_shared = + (!cfgl && cfgx && (typer || typew)) || + (!cfgl && !cfgx && (typer || (typew && prvm))) || + (cfgl && typex) || + (cfgl && typer && cfgx && prvm); + return mml_shared_region ? mml_chk_shared : mml_chk_normal; + } + } else { + bool m_bypass = (prvm && !cfgl); + return m_bypass || normal_rwx; + } +} + + +// implement class pmpcfg_csr_t +pmpcfg_csr_t::pmpcfg_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +reg_t pmpcfg_csr_t::read() const noexcept { + reg_t cfg_res = 0; + for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8 && i < state->max_pmp; i++) + cfg_res |= reg_t(state->pmpaddr[i]->cfg) << (8 * (i - i0)); + return cfg_res; +} + +bool pmpcfg_csr_t::unlogged_write(const reg_t val) noexcept { + if (proc->n_pmp == 0) + return false; + + bool write_success = false; + bool rlb = state->mseccfg->get_rlb(); + bool mml = state->mseccfg->get_mml(); + for (size_t i0 = (address - CSR_PMPCFG0) * 4, i = i0; i < i0 + proc->get_xlen() / 8; i++) { + if (i < proc->n_pmp) { + bool locked = (state->pmpaddr[i]->cfg & PMP_L); + bool next_locked = (i+1 < proc->n_pmp) && (state->pmpaddr[i+1]->cfg & PMP_L); + bool next_tor = (i+1 < proc->n_pmp) && (state->pmpaddr[i+1]->cfg & PMP_A) == PMP_TOR; + + if (rlb || (!locked && !(next_locked && next_tor))) { + uint8_t cfg = (val >> (8 * (i - i0))) & (PMP_R | PMP_W | PMP_X | PMP_A | PMP_L); + // Drop R=0 W=1 when MML = 0 + // Remove the restriction when MML = 1 + if (!mml) { + cfg &= ~PMP_W | ((cfg & PMP_R) ? PMP_W : 0); + } + // Disallow A=NA4 when granularity > 4 + if (proc->lg_pmp_granularity != PMP_SHIFT && (cfg & PMP_A) == PMP_NA4) + cfg |= PMP_NAPOT; + /* + Adding a rule with executable privileges that either is M-mode-only or a locked Shared-Region is not possible + and such pmpcfg writes are ignored, leaving pmpcfg unchanged. + This restriction can be temporarily lifted e.g. during the boot process, by setting mseccfg.RLB. + */ + if (rlb || !(mml && ((cfg & PMP_L) && ((cfg & PMP_X) || ((cfg & PMP_W) && !(cfg & PMP_R)))))) + state->pmpaddr[i]->cfg = cfg; + } + + write_success = true; + + state->mseccfg->pmplock_recorded &= ~(1ULL << i); + if (state->pmpaddr[i]->cfg & PMP_L) { + state->mseccfg->pmplock_recorded |= (1ULL << i); + } + } + } + proc->get_mmu()->flush_tlb(); + return write_success; +} + +// implement class mseccfg_csr_t +mseccfg_csr_t::mseccfg_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + mseccfg_val(0), + pmplock_recorded(0) { +} + +bool mseccfg_csr_t::get_mml() const noexcept { + return (mseccfg_val & MSECCFG_MML); +} + +bool mseccfg_csr_t::get_mmwp() const noexcept { + return (mseccfg_val & MSECCFG_MMWP); +} + +bool mseccfg_csr_t::get_rlb() const noexcept { + return (mseccfg_val & MSECCFG_RLB); +} + +reg_t mseccfg_csr_t::read() const noexcept { + return mseccfg_val; +} + +bool mseccfg_csr_t::unlogged_write(const reg_t val) noexcept { + if (proc->n_pmp == 0) + return false; + + //When mseccfg.RLB is 0 and pmpcfg.L is 1 in any rule or entry (including disabled entries) + if (!(pmplock_recorded && (mseccfg_val & MSECCFG_RLB)==0)) { + mseccfg_val &= ~MSECCFG_RLB; + mseccfg_val |= (val & MSECCFG_RLB); + } + + mseccfg_val |= (val & MSECCFG_MMWP); //MMWP is sticky + mseccfg_val |= (val & MSECCFG_MML); //MML is sticky + + proc->get_mmu()->flush_tlb(); + + return true; +} + +// implement class virtualized_csr_t +virtualized_csr_t::virtualized_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt): + csr_t(proc, orig->address), + orig_csr(orig), + virt_csr(virt) { +} + + +reg_t virtualized_csr_t::read() const noexcept { + return readvirt(state->v); +} + +reg_t virtualized_csr_t::readvirt(bool virt) const noexcept { + return virt ? virt_csr->read() : orig_csr->read(); +} + +bool virtualized_csr_t::unlogged_write(const reg_t val) noexcept { + if (state->v) + virt_csr->write(val); + else + orig_csr->write(val); + return false; // virt_csr or orig_csr has already logged +} + + +// implement class epc_csr_t +epc_csr_t::epc_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0) { +} + + +reg_t epc_csr_t::read() const noexcept { + return val & proc->pc_alignment_mask(); +} + + +bool epc_csr_t::unlogged_write(const reg_t val) noexcept { + this->val = val & ~(reg_t)1; + return true; +} + + +// implement class tvec_csr_t +tvec_csr_t::tvec_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0) { +} + + +reg_t tvec_csr_t::read() const noexcept { + return val; +} + + +bool tvec_csr_t::unlogged_write(const reg_t val) noexcept { + this->val = val & ~(reg_t)2; + return true; +} + + +// implement class cause_csr_t +cause_csr_t::cause_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + + +reg_t cause_csr_t::read() const noexcept { + reg_t val = basic_csr_t::read(); + // When reading, the interrupt bit needs to adjust to xlen. Spike does + // not generally support dynamic xlen, but this code was (partly) + // there since at least 2015 (ea58df8 and c4350ef). + if (proc->get_isa().get_max_xlen() > proc->get_xlen()) // Move interrupt bit to top of xlen + return val | ((val >> (proc->get_isa().get_max_xlen()-1)) << (proc->get_xlen()-1)); + return val; +} + + +// implement class base_status_csr_t +base_status_csr_t::base_status_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + has_page(proc->extension_enabled_const('S') && proc->supports_impl(IMPL_MMU)), + sstatus_write_mask(compute_sstatus_write_mask()), + sstatus_read_mask(sstatus_write_mask | SSTATUS_UBE | SSTATUS_UXL + | (proc->get_const_xlen() == 32 ? SSTATUS32_SD : SSTATUS64_SD)) { +} + + +reg_t base_status_csr_t::compute_sstatus_write_mask() const noexcept { + // If a configuration has FS bits, they will always be accessible no + // matter the state of misa. + const bool has_fs = proc->extension_enabled('S') || proc->extension_enabled('F') + || proc->extension_enabled('V'); + const bool has_vs = proc->extension_enabled('V'); + return 0 + | (proc->extension_enabled('S') ? (SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_SPP) : 0) + | (has_page ? (SSTATUS_SUM | SSTATUS_MXR) : 0) + | (has_fs ? SSTATUS_FS : 0) + | (proc->any_custom_extensions() ? SSTATUS_XS : 0) + | (has_vs ? SSTATUS_VS : 0) + ; +} + + +reg_t base_status_csr_t::adjust_sd(const reg_t val) const noexcept { + // This uses get_const_xlen() instead of get_xlen() not only because + // the variable is static, so it's only called once, but also + // because the SD bit moves when XLEN changes, which means we would + // need to call adjust_sd() on every read, instead of on every + // write. + static const reg_t sd_bit = proc->get_const_xlen() == 64 ? SSTATUS64_SD : SSTATUS32_SD; + if (((val & SSTATUS_FS) == SSTATUS_FS) || + ((val & SSTATUS_VS) == SSTATUS_VS) || + ((val & SSTATUS_XS) == SSTATUS_XS)) { + return val | sd_bit; + } + return val & ~sd_bit; +} + + +void base_status_csr_t::maybe_flush_tlb(const reg_t newval) noexcept { + if ((newval ^ read()) & + (MSTATUS_MPP | MSTATUS_MPRV + | (has_page ? (MSTATUS_MXR | MSTATUS_SUM) : 0) + )) + proc->get_mmu()->flush_tlb(); +} + + +namespace { + int xlen_to_uxl(int xlen) { + if (xlen == 32) + return 1; + if (xlen == 64) + return 2; + abort(); + } +} + + +// implement class vsstatus_csr_t +vsstatus_csr_t::vsstatus_csr_t(processor_t* const proc, const reg_t addr): + base_status_csr_t(proc, addr), + val(proc->get_state()->mstatus->read() & sstatus_read_mask) { +} + +bool vsstatus_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t newval = (this->val & ~sstatus_write_mask) | (val & sstatus_write_mask); + if (state->v) maybe_flush_tlb(newval); + this->val = adjust_sd(newval); + return true; +} + + +// implement class sstatus_proxy_csr_t +sstatus_proxy_csr_t::sstatus_proxy_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus): + base_status_csr_t(proc, addr), + mstatus(mstatus) { +} + +bool sstatus_proxy_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t new_mstatus = (mstatus->read() & ~sstatus_write_mask) | (val & sstatus_write_mask); + + mstatus->write(new_mstatus); + return false; // avoid double logging: already logged by mstatus->write() +} + + +// implement class mstatus_csr_t +mstatus_csr_t::mstatus_csr_t(processor_t* const proc, const reg_t addr): + base_status_csr_t(proc, addr), + val(0 + | (proc->extension_enabled_const('U') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_UXL, xlen_to_uxl(proc->get_const_xlen())) : 0) + | (proc->extension_enabled_const('S') && (proc->get_const_xlen() != 32) ? set_field((reg_t)0, MSTATUS_SXL, xlen_to_uxl(proc->get_const_xlen())) : 0) + +#ifdef RISCV_ENABLE_DUAL_ENDIAN + | (proc->get_mmu()->is_target_big_endian() ? MSTATUS_UBE | MSTATUS_SBE | MSTATUS_MBE : 0) +#endif + | 0 // initial value for mstatus + ) { +} + + +bool mstatus_csr_t::unlogged_write(const reg_t val) noexcept { + const bool has_mpv = proc->extension_enabled('S') && proc->extension_enabled('H'); + const bool has_gva = has_mpv; + + const reg_t mask = sstatus_write_mask + | MSTATUS_MIE | MSTATUS_MPIE | MSTATUS_MPRV + | MSTATUS_MPP | MSTATUS_TW + | (proc->extension_enabled('S') ? MSTATUS_TSR : 0) + | (has_page ? MSTATUS_TVM : 0) + | (has_gva ? MSTATUS_GVA : 0) + | (has_mpv ? MSTATUS_MPV : 0); + + const reg_t requested_mpp = proc->legalize_privilege(get_field(val, MSTATUS_MPP)); + const reg_t adjusted_val = set_field(val, MSTATUS_MPP, requested_mpp); + const reg_t new_mstatus = (read() & ~mask) | (adjusted_val & mask); + maybe_flush_tlb(new_mstatus); + this->val = adjust_sd(new_mstatus); + return true; +} + +// implement class mstatush_csr_t +mstatush_csr_t::mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus): + csr_t(proc, addr), + mstatus(mstatus), + mask(MSTATUSH_MPV | MSTATUSH_GVA | MSTATUSH_SBE | MSTATUSH_MBE) { +} + +reg_t mstatush_csr_t::read() const noexcept { + return (mstatus->read() >> 32) & mask; +} + +bool mstatush_csr_t::unlogged_write(const reg_t val) noexcept { + return mstatus->unlogged_write((mstatus->written_value() & ~(mask << 32)) | ((val & mask) << 32)); +} + +// implement class sstatus_csr_t +sstatus_csr_t::sstatus_csr_t(processor_t* const proc, sstatus_proxy_csr_t_p orig, vsstatus_csr_t_p virt): + virtualized_csr_t(proc, orig, virt), + orig_sstatus(orig), + virt_sstatus(virt) { +} + +void sstatus_csr_t::dirty(const reg_t dirties) { + // As an optimization, return early if already dirty. + if ((orig_sstatus->read() & dirties) == dirties) { + if (likely(!state->v || (virt_sstatus->read() & dirties) == dirties)) + return; + } + + // Catch problems like #823 where P-extension instructions were not + // checking for mstatus.VS!=Off: + if (!enabled(dirties)) abort(); + + orig_sstatus->write(orig_sstatus->read() | dirties); + if (state->v) { + virt_sstatus->write(virt_sstatus->read() | dirties); + } +} + +bool sstatus_csr_t::enabled(const reg_t which) { + if ((orig_sstatus->read() & which) != 0) { + if (!state->v || (virt_sstatus->read() & which) != 0) + return true; + } + + // If the field doesn't exist, it is always enabled. See #823. + if (!orig_sstatus->field_exists(which)) + return true; + + return false; +} + + +// implement class misa_csr_t +misa_csr_t::misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t max_isa): + basic_csr_t(proc, addr, max_isa), + max_isa(max_isa), + write_mask(max_isa & (0 // allow MAFDQCHV bits in MISA to be modified + | (1L << ('M' - 'A')) + | (1L << ('A' - 'A')) + | (1L << ('F' - 'A')) + | (1L << ('D' - 'A')) + | (1L << ('Q' - 'A')) + | (1L << ('C' - 'A')) + | (1L << ('H' - 'A')) + | (1L << ('V' - 'A')) + ) + ) { +} + +const reg_t misa_csr_t::dependency(const reg_t val, const char feature, const char depends_on) const noexcept { + return (val & (1L << (depends_on - 'A'))) ? val : (val & ~(1L << (feature - 'A'))); +} + +bool misa_csr_t::unlogged_write(const reg_t val) noexcept { + // the write is ignored if increasing IALIGN would misalign the PC + if (!(val & (1L << ('C' - 'A'))) && (state->pc & 2)) + return false; + + reg_t adjusted_val = val; + adjusted_val = dependency(adjusted_val, 'D', 'F'); + adjusted_val = dependency(adjusted_val, 'Q', 'D'); + adjusted_val = dependency(adjusted_val, 'V', 'D'); + + const reg_t old_misa = read(); + const bool prev_h = old_misa & (1L << ('H' - 'A')); + const reg_t new_misa = (adjusted_val & write_mask) | (old_misa & ~write_mask); + const bool new_h = new_misa & (1L << ('H' - 'A')); + + // update the hypervisor-only bits in MEDELEG and other CSRs + if (!new_h && prev_h) { + reg_t hypervisor_exceptions = 0 + | (1 << CAUSE_VIRTUAL_SUPERVISOR_ECALL) + | (1 << CAUSE_FETCH_GUEST_PAGE_FAULT) + | (1 << CAUSE_LOAD_GUEST_PAGE_FAULT) + | (1 << CAUSE_VIRTUAL_INSTRUCTION) + | (1 << CAUSE_STORE_GUEST_PAGE_FAULT) + ; + state->medeleg->write(state->medeleg->read() & ~hypervisor_exceptions); + state->mstatus->write(state->mstatus->read() & ~(MSTATUS_GVA | MSTATUS_MPV)); + state->mie->write_with_mask(MIP_HS_MASK, 0); // also takes care of hie, sie + state->mip->write_with_mask(MIP_HS_MASK, 0); // also takes care of hip, sip, hvip + state->hstatus->write(0); + } + + return basic_csr_t::unlogged_write(new_misa); +} + +bool misa_csr_t::extension_enabled_const(unsigned char ext) const noexcept { + assert(!(1 & (write_mask >> (ext - 'A')))); + return extension_enabled(ext); +} + + +// implement class mip_or_mie_csr_t +mip_or_mie_csr_t::mip_or_mie_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0) { +} + +reg_t mip_or_mie_csr_t::read() const noexcept { + return val; +} + +void mip_or_mie_csr_t::write_with_mask(const reg_t mask, const reg_t val) noexcept { + this->val = (this->val & ~mask) | (val & mask); + log_write(); +} + +bool mip_or_mie_csr_t::unlogged_write(const reg_t val) noexcept { + write_with_mask(write_mask(), val); + return false; // avoid double logging: already logged by write_with_mask() +} + + +mip_csr_t::mip_csr_t(processor_t* const proc, const reg_t addr): + mip_or_mie_csr_t(proc, addr) { +} + +void mip_csr_t::backdoor_write_with_mask(const reg_t mask, const reg_t val) noexcept { + this->val = (this->val & ~mask) | (val & mask); +} + +reg_t mip_csr_t::write_mask() const noexcept { + const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + const reg_t vssip_int = proc->extension_enabled('H') ? MIP_VSSIP : 0; + const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0; + // We must mask off sgeip, vstip, and vseip. All three of these + // bits are aliases for the same bits in hip. The hip spec says: + // * sgeip is read-only -- write hgeip instead + // * vseip is read-only -- write hvip instead + // * vstip is read-only -- write hvip instead + return (supervisor_ints | hypervisor_ints) & + (MIP_SEIP | MIP_SSIP | MIP_STIP | vssip_int); +} + + +mie_csr_t::mie_csr_t(processor_t* const proc, const reg_t addr): + mip_or_mie_csr_t(proc, addr) { +} + + +reg_t mie_csr_t::write_mask() const noexcept { + const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + const reg_t hypervisor_ints = proc->extension_enabled('H') ? MIP_HS_MASK : 0; + const reg_t coprocessor_ints = (reg_t)proc->any_custom_extensions() << IRQ_COP; + const reg_t delegable_ints = supervisor_ints | coprocessor_ints; + const reg_t all_ints = delegable_ints | hypervisor_ints | MIP_MSIP | MIP_MTIP | MIP_MEIP; + return all_ints; +} + + +// implement class generic_int_accessor_t +generic_int_accessor_t::generic_int_accessor_t(state_t* const state, + const reg_t read_mask, + const reg_t ip_write_mask, + const reg_t ie_write_mask, + const mask_mode_t mask_mode, + const int shiftamt): + state(state), + read_mask(read_mask), + ip_write_mask(ip_write_mask), + ie_write_mask(ie_write_mask), + mask_mideleg(mask_mode == MIDELEG), + mask_hideleg(mask_mode == HIDELEG), + shiftamt(shiftamt) { +} + +reg_t generic_int_accessor_t::ip_read() const noexcept { + return (state->mip->read() & deleg_mask() & read_mask) >> shiftamt; +} + +void generic_int_accessor_t::ip_write(const reg_t val) noexcept { + const reg_t mask = deleg_mask() & ip_write_mask; + state->mip->write_with_mask(mask, val << shiftamt); +} + +reg_t generic_int_accessor_t::ie_read() const noexcept { + return (state->mie->read() & deleg_mask() & read_mask) >> shiftamt; +} + +void generic_int_accessor_t::ie_write(const reg_t val) noexcept { + const reg_t mask = deleg_mask() & ie_write_mask; + state->mie->write_with_mask(mask, val << shiftamt); +} + +reg_t generic_int_accessor_t::deleg_mask() const { + const reg_t hideleg_mask = mask_hideleg ? state->hideleg->read() : (reg_t)~0; + const reg_t mideleg_mask = mask_mideleg ? state->mideleg->read() : (reg_t)~0; + return hideleg_mask & mideleg_mask; +} + + +// implement class mip_proxy_csr_t +mip_proxy_csr_t::mip_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr): + csr_t(proc, addr), + accr(accr) { +} + +reg_t mip_proxy_csr_t::read() const noexcept { + return accr->ip_read(); +} + +bool mip_proxy_csr_t::unlogged_write(const reg_t val) noexcept { + accr->ip_write(val); + return false; // accr has already logged +} + +// implement class mie_proxy_csr_t +mie_proxy_csr_t::mie_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr): + csr_t(proc, addr), + accr(accr) { +} + +reg_t mie_proxy_csr_t::read() const noexcept { + return accr->ie_read(); +} + +bool mie_proxy_csr_t::unlogged_write(const reg_t val) noexcept { + accr->ie_write(val); + return false; // accr has already logged +} + + +// implement class mideleg_csr_t +mideleg_csr_t::mideleg_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +reg_t mideleg_csr_t::read() const noexcept { + reg_t val = basic_csr_t::read(); + if (proc->extension_enabled('H')) return val | MIDELEG_FORCED_MASK; + // No need to clear MIDELEG_FORCED_MASK because those bits can never + // get set in val. + return val; +} + +void mideleg_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!proc->extension_enabled('S')) + throw trap_illegal_instruction(insn.bits()); +} + +bool mideleg_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t supervisor_ints = proc->extension_enabled('S') ? MIP_SSIP | MIP_STIP | MIP_SEIP : 0; + const reg_t coprocessor_ints = (reg_t)proc->any_custom_extensions() << IRQ_COP; + const reg_t delegable_ints = supervisor_ints | coprocessor_ints; + + return basic_csr_t::unlogged_write(val & delegable_ints); +} + + +// implement class medeleg_csr_t +medeleg_csr_t::medeleg_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0), + hypervisor_exceptions(0 + | (1 << CAUSE_VIRTUAL_SUPERVISOR_ECALL) + | (1 << CAUSE_FETCH_GUEST_PAGE_FAULT) + | (1 << CAUSE_LOAD_GUEST_PAGE_FAULT) + | (1 << CAUSE_VIRTUAL_INSTRUCTION) + | (1 << CAUSE_STORE_GUEST_PAGE_FAULT) + ) { +} + +void medeleg_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!proc->extension_enabled('S')) + throw trap_illegal_instruction(insn.bits()); +} + +bool medeleg_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t mask = 0 + | (1 << CAUSE_MISALIGNED_FETCH) + | (1 << CAUSE_BREAKPOINT) + | (1 << CAUSE_USER_ECALL) + | (1 << CAUSE_SUPERVISOR_ECALL) + | (1 << CAUSE_FETCH_PAGE_FAULT) + | (1 << CAUSE_LOAD_PAGE_FAULT) + | (1 << CAUSE_STORE_PAGE_FAULT) + | (proc->extension_enabled('H') ? hypervisor_exceptions : 0) + ; + return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); +} + + +// implement class masked_csr_t +masked_csr_t::masked_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): + basic_csr_t(proc, addr, init), + mask(mask) { +} + +bool masked_csr_t::unlogged_write(const reg_t val) noexcept { + return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); +} + + +// implement class base_atp_csr_t and family +base_atp_csr_t::base_atp_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + + +bool base_atp_csr_t::unlogged_write(const reg_t val) noexcept { + const reg_t newval = proc->supports_impl(IMPL_MMU) ? compute_new_satp(val) : 0; + if (newval != read()) + proc->get_mmu()->flush_tlb(); + return basic_csr_t::unlogged_write(newval); +} + +bool base_atp_csr_t::satp_valid(reg_t val) const noexcept { + if (proc->get_xlen() == 32) { + switch (get_field(val, SATP32_MODE)) { + case SATP_MODE_SV32: return proc->supports_impl(IMPL_MMU_SV32); + case SATP_MODE_OFF: return true; + default: return false; + } + } else { + switch (get_field(val, SATP64_MODE)) { + case SATP_MODE_SV39: return proc->supports_impl(IMPL_MMU_SV39); + case SATP_MODE_SV48: return proc->supports_impl(IMPL_MMU_SV48); + case SATP_MODE_SV57: return proc->supports_impl(IMPL_MMU_SV57); + case SATP_MODE_OFF: return true; + default: return false; + } + } +} + +reg_t base_atp_csr_t::compute_new_satp(reg_t val) const noexcept { + reg_t rv64_ppn_mask = (reg_t(1) << (MAX_PADDR_BITS - PGSHIFT)) - 1; + + reg_t mode_mask = proc->get_xlen() == 32 ? SATP32_MODE : SATP64_MODE; + reg_t asid_mask_if_enabled = proc->get_xlen() == 32 ? SATP32_ASID : SATP64_ASID; + reg_t asid_mask = proc->supports_impl(IMPL_MMU_ASID) ? asid_mask_if_enabled : 0; + reg_t ppn_mask = proc->get_xlen() == 32 ? SATP32_PPN : SATP64_PPN & rv64_ppn_mask; + reg_t new_mask = (satp_valid(val) ? mode_mask : 0) | asid_mask | ppn_mask; + reg_t old_mask = satp_valid(val) ? 0 : mode_mask; + + return (new_mask & val) | (old_mask & read()); +} + +satp_csr_t::satp_csr_t(processor_t* const proc, const reg_t addr): + base_atp_csr_t(proc, addr) { +} + +void satp_csr_t::verify_permissions(insn_t insn, bool write) const { + base_atp_csr_t::verify_permissions(insn, write); + if (get_field(state->mstatus->read(), MSTATUS_TVM)) + require(state->prv >= PRV_M); +} + +virtualized_satp_csr_t::virtualized_satp_csr_t(processor_t* const proc, satp_csr_t_p orig, csr_t_p virt): + virtualized_csr_t(proc, orig, virt), + orig_satp(orig) { +} + +void virtualized_satp_csr_t::verify_permissions(insn_t insn, bool write) const { + virtualized_csr_t::verify_permissions(insn, write); + + // If satp is accessed from VS mode, it's really accessing vsatp, + // and the hstatus.VTVM bit controls. + if (state->v) { + if (get_field(state->hstatus->read(), HSTATUS_VTVM)) + throw trap_virtual_instruction(insn.bits()); + } + else { + orig_csr->verify_permissions(insn, write); + } +} + +bool virtualized_satp_csr_t::unlogged_write(const reg_t val) noexcept { + // If unsupported Mode field: no change to contents + const reg_t newval = orig_satp->satp_valid(val) ? val : read(); + return virtualized_csr_t::unlogged_write(newval); +} + + +// implement class wide_counter_csr_t +wide_counter_csr_t::wide_counter_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + val(0) { +} + +reg_t wide_counter_csr_t::read() const noexcept { + return val; +} + +void wide_counter_csr_t::bump(const reg_t howmuch) noexcept { + val += howmuch; // to keep log reasonable size, don't log every bump +} + +bool wide_counter_csr_t::unlogged_write(const reg_t val) noexcept { + if (proc->get_xlen() == 32) + this->val = (this->val >> 32 << 32) | (val & 0xffffffffU); + else + this->val = val; + // The ISA mandates that if an instruction writes instret, the write + // takes precedence over the increment to instret. However, Spike + // unconditionally increments instret after executing an instruction. + // Correct for this artifact by decrementing instret here. + this->val--; + return true; +} + +reg_t wide_counter_csr_t::written_value() const noexcept { + // Re-adjust for upcoming bump() + return this->val + 1; +} + +void wide_counter_csr_t::write_upper_half(const reg_t val) noexcept { + this->val = (val << 32) | (this->val << 32 >> 32); + this->val--; // See comment above. + // Log upper half only. + log_special_write(address + (CSR_MINSTRETH - CSR_MINSTRET), written_value() >> 32); +} + + +counter_top_csr_t::counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent): + csr_t(proc, addr), + parent(parent) { +} + +reg_t counter_top_csr_t::read() const noexcept { + return parent->read() >> 32; +} + +bool counter_top_csr_t::unlogged_write(const reg_t val) noexcept { + parent->write_upper_half(val); + return true; +} + + +proxy_csr_t::proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): + csr_t(proc, addr), + delegate(delegate) { +} + +reg_t proxy_csr_t::read() const noexcept { + return delegate->read(); +} + +bool proxy_csr_t::unlogged_write(const reg_t val) noexcept { + delegate->write(val); // log only under the original (delegate's) name + return false; +} + + +const_csr_t::const_csr_t(processor_t* const proc, const reg_t addr, reg_t val): + csr_t(proc, addr), + val(val) { +} + +reg_t const_csr_t::read() const noexcept { + return val; +} + +bool const_csr_t::unlogged_write(const reg_t val) noexcept { + return false; +} + + +counter_proxy_csr_t::counter_proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate): + proxy_csr_t(proc, addr, delegate) { +} + +bool counter_proxy_csr_t::myenable(csr_t_p counteren) const noexcept { + return 1 & (counteren->read() >> (address & 31)); +} + +void counter_proxy_csr_t::verify_permissions(insn_t insn, bool write) const { + const bool mctr_ok = (state->prv < PRV_M) ? myenable(state->mcounteren) : true; + const bool hctr_ok = state->v ? myenable(state->hcounteren) : true; + const bool sctr_ok = (proc->extension_enabled('S') && state->prv < PRV_S) ? myenable(state->scounteren) : true; + + if (write || !mctr_ok) + throw trap_illegal_instruction(insn.bits()); + if (!hctr_ok) + throw trap_virtual_instruction(insn.bits()); + if (!sctr_ok) { + if (state->v) + throw trap_virtual_instruction(insn.bits()); + else + throw trap_illegal_instruction(insn.bits()); + } +} + + +hypervisor_csr_t::hypervisor_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +void hypervisor_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!proc->extension_enabled('H')) + throw trap_illegal_instruction(insn.bits()); +} + + +hideleg_csr_t::hideleg_csr_t(processor_t* const proc, const reg_t addr, csr_t_p mideleg): + masked_csr_t(proc, addr, MIP_VS_MASK, 0), + mideleg(mideleg) { +} + +reg_t hideleg_csr_t::read() const noexcept { + return masked_csr_t::read() & mideleg->read(); +}; + + +hgatp_csr_t::hgatp_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +void hgatp_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!state->v && get_field(state->mstatus->read(), MSTATUS_TVM)) + require_privilege(PRV_M); +} + +bool hgatp_csr_t::unlogged_write(const reg_t val) noexcept { + proc->get_mmu()->flush_tlb(); + + reg_t mask; + if (proc->get_const_xlen() == 32) { + mask = HGATP32_PPN | + HGATP32_MODE | + proc->supports_impl(IMPL_MMU_VMID) ? HGATP32_VMID : 0; + } else { + mask = (HGATP64_PPN & ((reg_t(1) << (MAX_PADDR_BITS - PGSHIFT)) - 1)) | + (proc->supports_impl(IMPL_MMU_VMID) ? HGATP64_VMID : 0); + + if (get_field(val, HGATP64_MODE) == HGATP_MODE_OFF || + (proc->supports_impl(IMPL_MMU_SV39) && get_field(val, HGATP64_MODE) == HGATP_MODE_SV39X4) || + (proc->supports_impl(IMPL_MMU_SV48) && get_field(val, HGATP64_MODE) == HGATP_MODE_SV48X4) || + (proc->supports_impl(IMPL_MMU_SV57) && get_field(val, HGATP64_MODE) == HGATP_MODE_SV57X4)) + mask |= HGATP64_MODE; + } + mask &= ~(reg_t)3; + return basic_csr_t::unlogged_write((read() & ~mask) | (val & mask)); +} + + +tselect_csr_t::tselect_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +bool tselect_csr_t::unlogged_write(const reg_t val) noexcept { + return basic_csr_t::unlogged_write((val < proc->TM.count()) ? val : read()); +} + + +tdata1_csr_t::tdata1_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +reg_t tdata1_csr_t::read() const noexcept { + return proc->TM.tdata1_read(proc, state->tselect->read()); +} + +bool tdata1_csr_t::unlogged_write(const reg_t val) noexcept { + return proc->TM.tdata1_write(proc, state->tselect->read(), val); +} + + +tdata2_csr_t::tdata2_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +reg_t tdata2_csr_t::read() const noexcept { + return proc->TM.tdata2_read(proc, state->tselect->read()); +} + +bool tdata2_csr_t::unlogged_write(const reg_t val) noexcept { + return proc->TM.tdata2_write(proc, state->tselect->read(), val); +} + + +debug_mode_csr_t::debug_mode_csr_t(processor_t* const proc, const reg_t addr): + basic_csr_t(proc, addr, 0) { +} + +void debug_mode_csr_t::verify_permissions(insn_t insn, bool write) const { + basic_csr_t::verify_permissions(insn, write); + if (!state->debug_mode) + throw trap_illegal_instruction(insn.bits()); +} + +dpc_csr_t::dpc_csr_t(processor_t* const proc, const reg_t addr): + epc_csr_t(proc, addr) { +} + +void dpc_csr_t::verify_permissions(insn_t insn, bool write) const { + epc_csr_t::verify_permissions(insn, write); + if (!state->debug_mode) + throw trap_illegal_instruction(insn.bits()); +} + + +dcsr_csr_t::dcsr_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr), + prv(0), + step(false), + ebreakm(false), + ebreakh(false), + ebreaks(false), + ebreaku(false), + halt(false), + cause(0) { +} + +void dcsr_csr_t::verify_permissions(insn_t insn, bool write) const { + csr_t::verify_permissions(insn, write); + if (!state->debug_mode) + throw trap_illegal_instruction(insn.bits()); +} + +reg_t dcsr_csr_t::read() const noexcept { + uint32_t v = 0; + v = set_field(v, DCSR_XDEBUGVER, 1); + v = set_field(v, DCSR_EBREAKM, ebreakm); + v = set_field(v, DCSR_EBREAKH, ebreakh); + v = set_field(v, DCSR_EBREAKS, ebreaks); + v = set_field(v, DCSR_EBREAKU, ebreaku); + v = set_field(v, DCSR_STOPCYCLE, 0); + v = set_field(v, DCSR_STOPTIME, 0); + v = set_field(v, DCSR_CAUSE, cause); + v = set_field(v, DCSR_STEP, step); + v = set_field(v, DCSR_PRV, prv); + return v; +} + +bool dcsr_csr_t::unlogged_write(const reg_t val) noexcept { + prv = get_field(val, DCSR_PRV); + step = get_field(val, DCSR_STEP); + // TODO: ndreset and fullreset + ebreakm = get_field(val, DCSR_EBREAKM); + ebreakh = get_field(val, DCSR_EBREAKH); + ebreaks = get_field(val, DCSR_EBREAKS); + ebreaku = get_field(val, DCSR_EBREAKU); + halt = get_field(val, DCSR_HALT); + return true; +} + +void dcsr_csr_t::write_cause_and_prv(uint8_t cause, reg_t prv) noexcept { + this->cause = cause; + this->prv = prv; + log_write(); +} + + +float_csr_t::float_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): + masked_csr_t(proc, addr, mask, init) { +} + +void float_csr_t::verify_permissions(insn_t insn, bool write) const { + masked_csr_t::verify_permissions(insn, write); + require_fp; + if (!proc->extension_enabled('F')) + throw trap_illegal_instruction(insn.bits()); +} + +bool float_csr_t::unlogged_write(const reg_t val) noexcept { + dirty_fp_state; + return masked_csr_t::unlogged_write(val); +} + + +composite_csr_t::composite_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb): + csr_t(proc, addr), + upper_csr(upper_csr), + lower_csr(lower_csr), + upper_lsb(upper_lsb) { +} + +void composite_csr_t::verify_permissions(insn_t insn, bool write) const { + // It is reasonable to assume that either underlying CSR will have + // the same permissions as this composite. + upper_csr->verify_permissions(insn, write); +} + +reg_t composite_csr_t::read() const noexcept { + return (upper_csr->read() << upper_lsb) | lower_csr->read(); +} + +bool composite_csr_t::unlogged_write(const reg_t val) noexcept { + upper_csr->write(val >> upper_lsb); + lower_csr->write(val); + return false; // logging is done only by the underlying CSRs +} + + +seed_csr_t::seed_csr_t(processor_t* const proc, const reg_t addr): + csr_t(proc, addr) { +} + +void seed_csr_t::verify_permissions(insn_t insn, bool write) const { + /* Read-only access disallowed due to wipe-on-read side effect */ + /* XXX mseccfg.sseed and mseccfg.useed should be verified. */ + if (!proc->extension_enabled(EXT_ZKR) || !write) + throw trap_illegal_instruction(insn.bits()); + csr_t::verify_permissions(insn, write); +} + +reg_t seed_csr_t::read() const noexcept { + return proc->es.get_seed(); +} + +bool seed_csr_t::unlogged_write(const reg_t val) noexcept { + proc->es.set_seed(val); + return true; +} + + + +vector_csr_t::vector_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init): + basic_csr_t(proc, addr, init), + mask(mask) { +} + +void vector_csr_t::verify_permissions(insn_t insn, bool write) const { + require_vector_vs; + if (!proc->extension_enabled('V')) + throw trap_illegal_instruction(insn.bits()); + basic_csr_t::verify_permissions(insn, write); +} + +void vector_csr_t::write_raw(const reg_t val) noexcept { + const bool success = basic_csr_t::unlogged_write(val); + if (success) + log_write(); +} + +bool vector_csr_t::unlogged_write(const reg_t val) noexcept { + if (mask == 0) return false; + dirty_vs_state; + return basic_csr_t::unlogged_write(val & mask); +} + + +vxsat_csr_t::vxsat_csr_t(processor_t* const proc, const reg_t addr): + masked_csr_t(proc, addr, /*mask*/ 1, /*init*/ 0) { +} + +void vxsat_csr_t::verify_permissions(insn_t insn, bool write) const { + require_vector_vs; + if (!proc->extension_enabled('V') && !proc->extension_enabled(EXT_ZPN)) + throw trap_illegal_instruction(insn.bits()); + masked_csr_t::verify_permissions(insn, write); +} + +bool vxsat_csr_t::unlogged_write(const reg_t val) noexcept { + dirty_vs_state; + return masked_csr_t::unlogged_write(val); +} diff --git a/vendor/riscv-isa-sim/riscv/csrs.h b/vendor/riscv-isa-sim/riscv/csrs.h new file mode 100644 index 00000000..ed039555 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/csrs.h @@ -0,0 +1,701 @@ +// See LICENSE for license details. +#ifndef _RISCV_CSRS_H +#define _RISCV_CSRS_H + +// For reg_t: +#include "decode.h" +// For std::shared_ptr +#include +// For access_type: +#include "memtracer.h" +#include + +class processor_t; +struct state_t; + +// Parent, abstract class for all CSRs +class csr_t { + public: + csr_t(processor_t* const proc, const reg_t addr); + + // Throw exception if read/write disallowed. + virtual void verify_permissions(insn_t insn, bool write) const; + + // read() returns the architectural value of this CSR. No permission + // checking needed or allowed. Side effects not allowed. + virtual reg_t read() const noexcept = 0; + + // write() updates the architectural value of this CSR. No + // permission checking needed or allowed. + // Child classes must implement unlogged_write() + void write(const reg_t val) noexcept; + + virtual ~csr_t(); + + protected: + // Return value indicates success; false means no write actually occurred + virtual bool unlogged_write(const reg_t val) noexcept = 0; + + // Record this CSR update (which has already happened) in the commit log + void log_write() const noexcept; + + // Record a write to an alternate CSR (e.g. minstreth instead of minstret) + void log_special_write(const reg_t address, const reg_t val) const noexcept; + + // What value was written to this reg? Default implementation simply + // calls read(), but a few CSRs are special. + virtual reg_t written_value() const noexcept; + + processor_t* const proc; + state_t* const state; + public: + const reg_t address; + private: + const unsigned csr_priv; + const bool csr_read_only; +}; + +typedef std::shared_ptr csr_t_p; + + +// Basic CSRs, with XLEN bits fully readable and writable. +class basic_csr_t: public csr_t { + public: + basic_csr_t(processor_t* const proc, const reg_t addr, const reg_t init); + + virtual reg_t read() const noexcept override { + return val; + } + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; +}; + + +class pmpaddr_csr_t: public csr_t { + public: + pmpaddr_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + + // Does a 4-byte access at the specified address match this PMP entry? + bool match4(reg_t addr) const noexcept; + + // Does the specified range match only a proper subset of this page? + bool subset_match(reg_t addr, reg_t len) const noexcept; + + // Is the specified access allowed given the pmpcfg privileges? + bool access_ok(access_type type, reg_t mode) const noexcept; + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + // Assuming this is configured as TOR, return address for top of + // range. Also forms bottom-of-range for next-highest pmpaddr + // register if that one is TOR. + reg_t tor_paddr() const noexcept; + + // Assuming this is configured as TOR, return address for bottom of + // range. This is tor_paddr() from the previous pmpaddr register. + reg_t tor_base_paddr() const noexcept; + + // Assuming this is configured as NAPOT or NA4, return mask for paddr. + // E.g. for 4KiB region, returns 0xffffffff_fffff000. + reg_t napot_mask() const noexcept; + + bool next_locked_and_tor() const noexcept; + reg_t val; + friend class pmpcfg_csr_t; // so he can access cfg + uint8_t cfg; + const size_t pmpidx; +}; + +typedef std::shared_ptr pmpaddr_csr_t_p; + +class pmpcfg_csr_t: public csr_t { + public: + pmpcfg_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +class mseccfg_csr_t: public csr_t { + public: + mseccfg_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + bool get_mml() const noexcept; + bool get_mmwp() const noexcept; + bool get_rlb() const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t mseccfg_val; + reg_t pmplock_recorded; + friend class pmpcfg_csr_t; //pmpcfg needs to access pmplock_recorded + //friend class pmpaddr_csr_t; +}; + +typedef std::shared_ptr mseccfg_csr_t_p; + +// For CSRs that have a virtualized copy under another name. Each +// instance of virtualized_csr_t will read/write one of two CSRs, +// based on state.v. E.g. sscratch, stval, etc. +// +// Example: sscratch and vsscratch are both instances of basic_csr_t. +// The csrmap will contain a virtualized_csr_t under sscratch's +// address, plus the vsscratch basic_csr_t under its address. + +class virtualized_csr_t: public csr_t { + public: + virtualized_csr_t(processor_t* const proc, csr_t_p orig, csr_t_p virt); + + virtual reg_t read() const noexcept override; + // Instead of using state.v, explicitly request original or virtual: + reg_t readvirt(bool virt) const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + csr_t_p orig_csr; + csr_t_p virt_csr; +}; + +typedef std::shared_ptr virtualized_csr_t_p; + +// For mepc, sepc, and vsepc +class epc_csr_t: public csr_t { + public: + epc_csr_t(processor_t* const proc, const reg_t addr); + + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; +}; + + +// For mtvec, stvec, and vstvec +class tvec_csr_t: public csr_t { + public: + tvec_csr_t(processor_t* const proc, const reg_t addr); + + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; +}; + + +// For mcause, scause, and vscause +class cause_csr_t: public basic_csr_t { + public: + cause_csr_t(processor_t* const proc, const reg_t addr); + + virtual reg_t read() const noexcept override; +}; + + +// For *status family of CSRs +class base_status_csr_t: public csr_t { + public: + base_status_csr_t(processor_t* const proc, const reg_t addr); + + bool field_exists(const reg_t which) { + return (sstatus_write_mask & which) != 0; + } + + protected: + reg_t adjust_sd(const reg_t val) const noexcept; + void maybe_flush_tlb(const reg_t newval) noexcept; + const bool has_page; + const reg_t sstatus_write_mask; + const reg_t sstatus_read_mask; + private: + reg_t compute_sstatus_write_mask() const noexcept; +}; + +typedef std::shared_ptr base_status_csr_t_p; + + +// For vsstatus, which is its own separate architectural register +// (unlike sstatus) +class vsstatus_csr_t final: public base_status_csr_t { + public: + vsstatus_csr_t(processor_t* const proc, const reg_t addr); + + reg_t read() const noexcept override { + return val; + } + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; +}; + +typedef std::shared_ptr vsstatus_csr_t_p; + + +class mstatus_csr_t final: public base_status_csr_t { + public: + mstatus_csr_t(processor_t* const proc, const reg_t addr); + + reg_t read() const noexcept override { + return val; + } + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t val; + friend class mstatush_csr_t; +}; + +typedef std::shared_ptr mstatus_csr_t_p; + + +class mstatush_csr_t: public csr_t { + public: + mstatush_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + mstatus_csr_t_p mstatus; + const reg_t mask; +}; + + +class sstatus_proxy_csr_t final: public base_status_csr_t { + public: + sstatus_proxy_csr_t(processor_t* const proc, const reg_t addr, mstatus_csr_t_p mstatus); + + reg_t read() const noexcept override { + return mstatus->read() & sstatus_read_mask; + } + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + mstatus_csr_t_p mstatus; +}; + +typedef std::shared_ptr sstatus_proxy_csr_t_p; + +class sstatus_csr_t: public virtualized_csr_t { + public: + sstatus_csr_t(processor_t* const proc, sstatus_proxy_csr_t_p orig, vsstatus_csr_t_p virt); + + // Set FS, VS, or XS bits to dirty + void dirty(const reg_t dirties); + // Return true if the specified bits are not 00 (Off) + bool enabled(const reg_t which); + private: + sstatus_proxy_csr_t_p orig_sstatus; + vsstatus_csr_t_p virt_sstatus; +}; + +typedef std::shared_ptr sstatus_csr_t_p; + + +class misa_csr_t final: public basic_csr_t { + public: + misa_csr_t(processor_t* const proc, const reg_t addr, const reg_t max_isa); + + bool extension_enabled(unsigned char ext) const noexcept { + assert(ext >= 'A' && ext <= 'Z'); + return (read() >> (ext - 'A')) & 1; + } + + bool extension_enabled_const(unsigned char ext) const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + const reg_t max_isa; + const reg_t write_mask; + const reg_t dependency(const reg_t val, const char feature, const char depends_on) const noexcept; +}; + +typedef std::shared_ptr misa_csr_t_p; + + +class mip_or_mie_csr_t: public csr_t { + public: + mip_or_mie_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override final; + + void write_with_mask(const reg_t mask, const reg_t val) noexcept; + + protected: + virtual bool unlogged_write(const reg_t val) noexcept override final; + reg_t val; + private: + virtual reg_t write_mask() const noexcept = 0; +}; + + +// mip is special because some of the bits are driven by hardware pins +class mip_csr_t: public mip_or_mie_csr_t { + public: + mip_csr_t(processor_t* const proc, const reg_t addr); + + // Does not log. Used by external things (clint) that wiggle bits in mip. + void backdoor_write_with_mask(const reg_t mask, const reg_t val) noexcept; + private: + virtual reg_t write_mask() const noexcept override; +}; + +typedef std::shared_ptr mip_csr_t_p; + + +class mie_csr_t: public mip_or_mie_csr_t { + public: + mie_csr_t(processor_t* const proc, const reg_t addr); + private: + virtual reg_t write_mask() const noexcept override; +}; + +typedef std::shared_ptr mie_csr_t_p; + + +// For sip, hip, hvip, vsip, sie, hie, vsie which are all just (masked +// & shifted) views into mip or mie. Each pair will have one of these +// objects describing the view, e.g. one for sip+sie, one for hip+hie, +// etc. +class generic_int_accessor_t { + public: + enum mask_mode_t { NONE, MIDELEG, HIDELEG }; + + generic_int_accessor_t(state_t* const state, + const reg_t read_mask, + const reg_t ip_write_mask, + const reg_t ie_write_mask, + const mask_mode_t mask_mode, + const int shiftamt); + reg_t ip_read() const noexcept; + void ip_write(const reg_t val) noexcept; + reg_t ie_read() const noexcept; + void ie_write(const reg_t val) noexcept; + private: + state_t* const state; + const reg_t read_mask; + const reg_t ip_write_mask; + const reg_t ie_write_mask; + const bool mask_mideleg; + const bool mask_hideleg; + const int shiftamt; + reg_t deleg_mask() const; +}; + +typedef std::shared_ptr generic_int_accessor_t_p; + + +// For all CSRs that are simply (masked & shifted) views into mip +class mip_proxy_csr_t: public csr_t { + public: + mip_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + generic_int_accessor_t_p accr; +}; + +// For all CSRs that are simply (masked & shifted) views into mie +class mie_proxy_csr_t: public csr_t { + public: + mie_proxy_csr_t(processor_t* const proc, const reg_t addr, generic_int_accessor_t_p accr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + generic_int_accessor_t_p accr; +}; + + + +class mideleg_csr_t: public basic_csr_t { + public: + mideleg_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + + +class medeleg_csr_t: public basic_csr_t { + public: + medeleg_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + const reg_t hypervisor_exceptions; +}; + + +// For CSRs with certain bits hardwired +class masked_csr_t: public basic_csr_t { + public: + masked_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init); + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + const reg_t mask; +}; + + +// For satp and vsatp +// These are three classes in order to handle the [V]TVM bits permission checks +class base_atp_csr_t: public basic_csr_t { + public: + base_atp_csr_t(processor_t* const proc, const reg_t addr); + bool satp_valid(reg_t val) const noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t compute_new_satp(reg_t val) const noexcept; +}; + +class satp_csr_t: public base_atp_csr_t { + public: + satp_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; + +typedef std::shared_ptr satp_csr_t_p; + +class virtualized_satp_csr_t: public virtualized_csr_t { + public: + virtualized_satp_csr_t(processor_t* const proc, satp_csr_t_p orig, csr_t_p virt); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + satp_csr_t_p orig_satp; +}; + + +// For minstret and mcycle, which are always 64 bits, but in RV32 are +// split into high and low halves. The first class always holds the +// full 64-bit value. +class wide_counter_csr_t: public csr_t { + public: + wide_counter_csr_t(processor_t* const proc, const reg_t addr); + // Always returns full 64-bit value + virtual reg_t read() const noexcept override; + void bump(const reg_t howmuch) noexcept; + void write_upper_half(const reg_t val) noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + virtual reg_t written_value() const noexcept override; + private: + reg_t val; +}; + +typedef std::shared_ptr wide_counter_csr_t_p; + + +// A simple proxy to read/write the upper half of minstret/mcycle +class counter_top_csr_t: public csr_t { + public: + counter_top_csr_t(processor_t* const proc, const reg_t addr, wide_counter_csr_t_p parent); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + wide_counter_csr_t_p parent; +}; + +typedef std::shared_ptr counter_top_csr_t_p; + + +// For a CSR that is an alias of another +class proxy_csr_t: public csr_t { + public: + proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate); + virtual reg_t read() const noexcept override; + protected: + bool unlogged_write(const reg_t val) noexcept override; + private: + csr_t_p delegate; +}; + + +// For a CSR with a fixed, unchanging value +class const_csr_t: public csr_t { + public: + const_csr_t(processor_t* const proc, const reg_t addr, reg_t val); + virtual reg_t read() const noexcept override; + protected: + bool unlogged_write(const reg_t val) noexcept override; + private: + const reg_t val; +}; + + +// For a CSR that is an unprivileged accessor of a privileged counter +class counter_proxy_csr_t: public proxy_csr_t { + public: + counter_proxy_csr_t(processor_t* const proc, const reg_t addr, csr_t_p delegate); + virtual void verify_permissions(insn_t insn, bool write) const override; + private: + bool myenable(csr_t_p counteren) const noexcept; +}; + + +// For machine-level CSRs that only exist with Hypervisor +class hypervisor_csr_t: public basic_csr_t { + public: + hypervisor_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; + + +class hideleg_csr_t: public masked_csr_t { + public: + hideleg_csr_t(processor_t* const proc, const reg_t addr, csr_t_p mideleg); + virtual reg_t read() const noexcept override; + private: + csr_t_p mideleg; +}; + + +class hgatp_csr_t: public basic_csr_t { + public: + hgatp_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + + +class tselect_csr_t: public basic_csr_t { + public: + tselect_csr_t(processor_t* const proc, const reg_t addr); + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + + +class tdata1_csr_t: public csr_t { + public: + tdata1_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +class tdata2_csr_t: public csr_t { + public: + tdata2_csr_t(processor_t* const proc, const reg_t addr); + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +// For CSRs that are only writable from debug mode +class debug_mode_csr_t: public basic_csr_t { + public: + debug_mode_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; + +typedef std::shared_ptr tdata2_csr_t_p; + + +class dpc_csr_t: public epc_csr_t { + public: + dpc_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; +}; + +class dcsr_csr_t: public csr_t { + public: + dcsr_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + void write_cause_and_prv(uint8_t cause, reg_t prv) noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + public: + uint8_t prv; + bool step; + bool ebreakm; + bool ebreakh; + bool ebreaks; + bool ebreaku; + bool halt; + uint8_t cause; +}; + +typedef std::shared_ptr dcsr_csr_t_p; + + +class float_csr_t final: public masked_csr_t { + public: + float_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +typedef std::shared_ptr float_csr_t_p; + + +// For a CSR like FCSR, that is actually a view into multiple +// underlying registers. +class composite_csr_t: public csr_t { + public: + // We assume the lower_csr maps to bit 0. + composite_csr_t(processor_t* const proc, const reg_t addr, csr_t_p upper_csr, csr_t_p lower_csr, const unsigned upper_lsb); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + csr_t_p upper_csr; + csr_t_p lower_csr; + const unsigned upper_lsb; +}; + + +class seed_csr_t: public csr_t { + public: + seed_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + virtual reg_t read() const noexcept override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + + +class vector_csr_t: public basic_csr_t { + public: + vector_csr_t(processor_t* const proc, const reg_t addr, const reg_t mask, const reg_t init=0); + virtual void verify_permissions(insn_t insn, bool write) const override; + // Write without regard to mask, and without touching mstatus.VS + void write_raw(const reg_t val) noexcept; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; + private: + reg_t mask; +}; + +typedef std::shared_ptr vector_csr_t_p; + + +// For CSRs shared between Vector and P extensions (vxsat) +class vxsat_csr_t: public masked_csr_t { + public: + vxsat_csr_t(processor_t* const proc, const reg_t addr); + virtual void verify_permissions(insn_t insn, bool write) const override; + protected: + virtual bool unlogged_write(const reg_t val) noexcept override; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/debug_defines.h b/vendor/riscv-isa-sim/riscv/debug_defines.h new file mode 100644 index 00000000..9ce54b72 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/debug_defines.h @@ -0,0 +1,2538 @@ +/* + * This file is auto-generated by running 'make debug_defines.h' in + * https://github.com/riscv/riscv-debug-spec/ (4ce69ad) + * License: Creative Commons Attribution 4.0 International Public License (CC BY 4.0) + */ + +#define DTM_IDCODE 0x01 +/* + * Identifies the release version of this part. + */ +#define DTM_IDCODE_VERSION_OFFSET 28 +#define DTM_IDCODE_VERSION_LENGTH 4 +#define DTM_IDCODE_VERSION (0xfU << DTM_IDCODE_VERSION_OFFSET) +/* + * Identifies the designer's part number of this part. + */ +#define DTM_IDCODE_PARTNUMBER_OFFSET 12 +#define DTM_IDCODE_PARTNUMBER_LENGTH 16 +#define DTM_IDCODE_PARTNUMBER (0xffffU << DTM_IDCODE_PARTNUMBER_OFFSET) +/* + * Identifies the designer/manufacturer of this part. Bits 6:0 must be + * bits 6:0 of the designer/manufacturer's Identification Code as + * assigned by JEDEC Standard JEP106. Bits 10:7 contain the modulo-16 + * count of the number of continuation characters (0x7f) in that same + * Identification Code. + */ +#define DTM_IDCODE_MANUFID_OFFSET 1 +#define DTM_IDCODE_MANUFID_LENGTH 11 +#define DTM_IDCODE_MANUFID (0x7ffU << DTM_IDCODE_MANUFID_OFFSET) +#define DTM_IDCODE_1_OFFSET 0 +#define DTM_IDCODE_1_LENGTH 1 +#define DTM_IDCODE_1 (0x1U << DTM_IDCODE_1_OFFSET) +#define DTM_DTMCS 0x10 +/* + * Writing 1 to this bit does a hard reset of the DTM, + * causing the DTM to forget about any outstanding DMI transactions, and + * returning all registers and internal state to their reset value. + * In general this should only be used when the Debugger has + * reason to expect that the outstanding DMI transaction will never + * complete (e.g. a reset condition caused an inflight DMI transaction to + * be cancelled). + */ +#define DTM_DTMCS_DMIHARDRESET_OFFSET 17 +#define DTM_DTMCS_DMIHARDRESET_LENGTH 1 +#define DTM_DTMCS_DMIHARDRESET (0x1U << DTM_DTMCS_DMIHARDRESET_OFFSET) +/* + * Writing 1 to this bit clears the sticky error state, but does + * not affect outstanding DMI transactions. + */ +#define DTM_DTMCS_DMIRESET_OFFSET 16 +#define DTM_DTMCS_DMIRESET_LENGTH 1 +#define DTM_DTMCS_DMIRESET (0x1U << DTM_DTMCS_DMIRESET_OFFSET) +/* + * This is a hint to the debugger of the minimum number of + * cycles a debugger should spend in + * Run-Test/Idle after every DMI scan to avoid a `busy' + * return code (\FdtmDtmcsDmistat of 3). A debugger must still + * check \FdtmDtmcsDmistat when necessary. + * + * 0: It is not necessary to enter Run-Test/Idle at all. + * + * 1: Enter Run-Test/Idle and leave it immediately. + * + * 2: Enter Run-Test/Idle and stay there for 1 cycle before leaving. + * + * And so on. + */ +#define DTM_DTMCS_IDLE_OFFSET 12 +#define DTM_DTMCS_IDLE_LENGTH 3 +#define DTM_DTMCS_IDLE (0x7U << DTM_DTMCS_IDLE_OFFSET) +/* + * 0: No error. + * + * 1: Reserved. Interpret the same as 2. + * + * 2: An operation failed (resulted in \FdtmDmiOp of 2). + * + * 3: An operation was attempted while a DMI access was still in + * progress (resulted in \FdtmDmiOp of 3). + */ +#define DTM_DTMCS_DMISTAT_OFFSET 10 +#define DTM_DTMCS_DMISTAT_LENGTH 2 +#define DTM_DTMCS_DMISTAT (0x3U << DTM_DTMCS_DMISTAT_OFFSET) +/* + * The size of \FdmSbaddressZeroAddress in \RdtmDmi. + */ +#define DTM_DTMCS_ABITS_OFFSET 4 +#define DTM_DTMCS_ABITS_LENGTH 6 +#define DTM_DTMCS_ABITS (0x3fU << DTM_DTMCS_ABITS_OFFSET) +/* + * 0: Version described in spec version 0.11. + * + * 1: Version described in spec versions 0.13 and 1.0. + * + * 15: Version not described in any available version of this spec. + */ +#define DTM_DTMCS_VERSION_OFFSET 0 +#define DTM_DTMCS_VERSION_LENGTH 4 +#define DTM_DTMCS_VERSION (0xfU << DTM_DTMCS_VERSION_OFFSET) +#define DTM_DMI 0x11 +/* + * Address used for DMI access. In Update-DR this value is used + * to access the DM over the DMI. + */ +#define DTM_DMI_ADDRESS_OFFSET 34 +#define DTM_DMI_ADDRESS_LENGTH abits +#define DTM_DMI_ADDRESS (((1L << abits) - 1) << DTM_DMI_ADDRESS_OFFSET) +/* + * The data to send to the DM over the DMI during Update-DR, and + * the data returned from the DM as a result of the previous operation. + */ +#define DTM_DMI_DATA_OFFSET 2 +#define DTM_DMI_DATA_LENGTH 32 +#define DTM_DMI_DATA (0xffffffffULL << DTM_DMI_DATA_OFFSET) +/* + * When the debugger writes this field, it has the following meaning: + * + * 0: Ignore \FdmSbdataZeroData and \FdmSbaddressZeroAddress. (nop) + * + * Don't send anything over the DMI during Update-DR. + * This operation should never result in a busy or error response. + * The address and data reported in the following Capture-DR + * are undefined. + * + * 1: Read from \FdmSbaddressZeroAddress. (read) + * + * 2: Write \FdmSbdataZeroData to \FdmSbaddressZeroAddress. (write) + * + * 3: Reserved. + * + * When the debugger reads this field, it means the following: + * + * 0: The previous operation completed successfully. + * + * 1: Reserved. + * + * 2: A previous operation failed. The data scanned into \RdtmDmi in + * this access will be ignored. This status is sticky and can be + * cleared by writing \FdtmDtmcsDmireset in \RdtmDtmcs. + * + * This indicates that the DM itself responded with an error. + * There are no specified cases in which the DM would + * respond with an error, and DMI is not required to support + * returning errors. + * + * 3: An operation was attempted while a DMI request is still in + * progress. The data scanned into \RdtmDmi in this access will be + * ignored. This status is sticky and can be cleared by writing + * \FdtmDtmcsDmireset in \RdtmDtmcs. If a debugger sees this status, it + * needs to give the target more TCK edges between Update-DR and + * Capture-DR. The simplest way to do that is to add extra transitions + * in Run-Test/Idle. + */ +#define DTM_DMI_OP_OFFSET 0 +#define DTM_DMI_OP_LENGTH 2 +#define DTM_DMI_OP (0x3ULL << DTM_DMI_OP_OFFSET) +#define CSR_DCSR 0x7b0 +/* + * 0: There is no debug support. + * + * 4: Debug support exists as it is described in this document. + * + * 15: There is debug support, but it does not conform to any + * available version of this spec. + */ +#define CSR_DCSR_DEBUGVER_OFFSET 28 +#define CSR_DCSR_DEBUGVER_LENGTH 4 +#define CSR_DCSR_DEBUGVER (0xfU << CSR_DCSR_DEBUGVER_OFFSET) +/* + * 0: {\tt ebreak} instructions in VS-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in VS-mode enter Debug Mode. + * + * This bit is hardwired to 0 if the hart does not support virtualization mode. + */ +#define CSR_DCSR_EBREAKVS_OFFSET 17 +#define CSR_DCSR_EBREAKVS_LENGTH 1 +#define CSR_DCSR_EBREAKVS (0x1U << CSR_DCSR_EBREAKVS_OFFSET) +/* + * 0: {\tt ebreak} instructions in VU-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in VU-mode enter Debug Mode. + * + * This bit is hardwired to 0 if the hart does not support virtualization mode. + */ +#define CSR_DCSR_EBREAKVU_OFFSET 16 +#define CSR_DCSR_EBREAKVU_LENGTH 1 +#define CSR_DCSR_EBREAKVU (0x1U << CSR_DCSR_EBREAKVU_OFFSET) +/* + * 0: {\tt ebreak} instructions in M-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in M-mode enter Debug Mode. + */ +#define CSR_DCSR_EBREAKM_OFFSET 15 +#define CSR_DCSR_EBREAKM_LENGTH 1 +#define CSR_DCSR_EBREAKM (0x1U << CSR_DCSR_EBREAKM_OFFSET) +/* + * 0: {\tt ebreak} instructions in S-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in S-mode enter Debug Mode. + * + * This bit is hardwired to 0 if the hart does not support S-mode. + */ +#define CSR_DCSR_EBREAKS_OFFSET 13 +#define CSR_DCSR_EBREAKS_LENGTH 1 +#define CSR_DCSR_EBREAKS (0x1U << CSR_DCSR_EBREAKS_OFFSET) +/* + * 0: {\tt ebreak} instructions in U-mode behave as described in the + * Privileged Spec. + * + * 1: {\tt ebreak} instructions in U-mode enter Debug Mode. + * + * This bit is hardwired to 0 if the hart does not support U-mode. + */ +#define CSR_DCSR_EBREAKU_OFFSET 12 +#define CSR_DCSR_EBREAKU_LENGTH 1 +#define CSR_DCSR_EBREAKU (0x1U << CSR_DCSR_EBREAKU_OFFSET) +/* + * 0: Interrupts (including NMI) are disabled during single stepping. + * + * 1: Interrupts (including NMI) are enabled during single stepping. + * + * Implementations may hard wire this bit to 0. + * In that case interrupt behavior can be emulated by the debugger. + * + * The debugger must not change the value of this bit while the hart + * is running. + */ +#define CSR_DCSR_STEPIE_OFFSET 11 +#define CSR_DCSR_STEPIE_LENGTH 1 +#define CSR_DCSR_STEPIE (0x1U << CSR_DCSR_STEPIE_OFFSET) +/* + * 0: Increment counters as usual. + * + * 1: Don't increment any hart-local counters while in Debug Mode or + * on {\tt ebreak} instructions that cause entry into Debug Mode. + * These counters include the {\tt instret} CSR. On single-hart cores + * {\tt cycle} should be stopped, but on multi-hart cores it must keep + * incrementing. + * + * An implementation may hardwire this bit to 0 or 1. + */ +#define CSR_DCSR_STOPCOUNT_OFFSET 10 +#define CSR_DCSR_STOPCOUNT_LENGTH 1 +#define CSR_DCSR_STOPCOUNT (0x1U << CSR_DCSR_STOPCOUNT_OFFSET) +/* + * 0: Increment \Rtime as usual. + * + * 1: Don't increment \Rtime while in Debug Mode. If all harts + * have \FcsrDcsrStoptime=1 and are in Debug Mode then \Rmtime + * is also allowed to stop incrementing. + * + * An implementation may hardwire this bit to 0 or 1. + */ +#define CSR_DCSR_STOPTIME_OFFSET 9 +#define CSR_DCSR_STOPTIME_LENGTH 1 +#define CSR_DCSR_STOPTIME (0x1U << CSR_DCSR_STOPTIME_OFFSET) +/* + * Explains why Debug Mode was entered. + * + * When there are multiple reasons to enter Debug Mode in a single + * cycle, hardware should set \FcsrDcsrCause to the cause with the highest + * priority. + * + * 1: An {\tt ebreak} instruction was executed. (priority 3) + * + * 2: A Trigger Module trigger fired with action=1. (priority 4) + * + * 3: The debugger requested entry to Debug Mode using \FdmDmcontrolHaltreq. + * (priority 1) + * + * 4: The hart single stepped because \FcsrDcsrStep was set. (priority 0, lowest) + * + * 5: The hart halted directly out of reset due to \Fresethaltreq. It + * is also acceptable to report 3 when this happens. (priority 2) + * + * 6: The hart halted because it's part of a halt group. (priority 5, + * highest) Harts may report 3 for this cause instead. + * + * Other values are reserved for future use. + */ +#define CSR_DCSR_CAUSE_OFFSET 6 +#define CSR_DCSR_CAUSE_LENGTH 3 +#define CSR_DCSR_CAUSE (0x7U << CSR_DCSR_CAUSE_OFFSET) +/* + * Extends the prv field with the virtualization mode the hart was operating + * in when Debug Mode was entered. The encoding is described in Table + * \ref{tab:privmode}. + * A debugger can change this value to change the hart's virtualization mode + * when exiting Debug Mode. + * This bit is hardwired to 0 on harts that do not support virtualization mode. + */ +#define CSR_DCSR_V_OFFSET 5 +#define CSR_DCSR_V_LENGTH 1 +#define CSR_DCSR_V (0x1U << CSR_DCSR_V_OFFSET) +/* + * 0: \FcsrMstatusMprv in \Rmstatus is ignored in Debug Mode. + * + * 1: \FcsrMstatusMprv in \Rmstatus takes effect in Debug Mode. + * + * Implementing this bit is optional. It may be tied to either 0 or 1. + */ +#define CSR_DCSR_MPRVEN_OFFSET 4 +#define CSR_DCSR_MPRVEN_LENGTH 1 +#define CSR_DCSR_MPRVEN (0x1U << CSR_DCSR_MPRVEN_OFFSET) +/* + * When set, there is a Non-Maskable-Interrupt (NMI) pending for the hart. + * + * Since an NMI can indicate a hardware error condition, + * reliable debugging may no longer be possible once this bit becomes set. + * This is implementation-dependent. + */ +#define CSR_DCSR_NMIP_OFFSET 3 +#define CSR_DCSR_NMIP_LENGTH 1 +#define CSR_DCSR_NMIP (0x1U << CSR_DCSR_NMIP_OFFSET) +/* + * When set and not in Debug Mode, the hart will only execute a single + * instruction and then enter Debug Mode. See Section~\ref{stepBit} + * for details. + * + * The debugger must not change the value of this bit while the hart + * is running. + */ +#define CSR_DCSR_STEP_OFFSET 2 +#define CSR_DCSR_STEP_LENGTH 1 +#define CSR_DCSR_STEP (0x1U << CSR_DCSR_STEP_OFFSET) +/* + * Contains the privilege mode the hart was operating in when Debug + * Mode was entered. The encoding is described in Table + * \ref{tab:privmode}. A debugger can change this value to change + * the hart's privilege mode when exiting Debug Mode. + * + * Not all privilege modes are supported on all harts. If the + * encoding written is not supported or the debugger is not allowed to + * change to it, the hart may change to any supported privilege mode. + */ +#define CSR_DCSR_PRV_OFFSET 0 +#define CSR_DCSR_PRV_LENGTH 2 +#define CSR_DCSR_PRV (0x3U << CSR_DCSR_PRV_OFFSET) +#define CSR_DPC 0x7b1 +#define CSR_DPC_DPC_OFFSET 0 +#define CSR_DPC_DPC_LENGTH DXLEN +#define CSR_DPC_DPC (((1L << DXLEN) - 1) << CSR_DPC_DPC_OFFSET) +#define CSR_DSCRATCH0 0x7b2 +#define CSR_DSCRATCH1 0x7b3 +#define CSR_TSELECT 0x7a0 +#define CSR_TSELECT_INDEX_OFFSET 0 +#define CSR_TSELECT_INDEX_LENGTH XLEN +#define CSR_TSELECT_INDEX (((1L << XLEN) - 1) << CSR_TSELECT_INDEX_OFFSET) +#define CSR_TDATA1 0x7a1 +/* + * 0: There is no trigger at this \RcsrTselect. + * + * 1: The trigger is a legacy SiFive address match trigger. These + * should not be implemented and aren't further documented here. + * + * 2: The trigger is an address/data match trigger. The remaining bits + * in this register act as described in \RcsrMcontrol. + * + * 3: The trigger is an instruction count trigger. The remaining bits + * in this register act as described in \RcsrIcount. + * + * 4: The trigger is an interrupt trigger. The remaining bits + * in this register act as described in \RcsrItrigger. + * + * 5: The trigger is an exception trigger. The remaining bits + * in this register act as described in \RcsrEtrigger. + * + * 6: The trigger is an address/data match trigger. The remaining bits + * in this register act as described in \RcsrMcontrolSix. This is similar + * to a type 2 trigger, but provides additional functionality and + * should be used instead of type 2 in newer implementations. + * + * 7: The trigger is a trigger source external to the TM. The + * remaining bits in this register act as described in \RcsrTmexttrigger. + * + * 12--14: These trigger types are available for non-standard use. + * + * 15: This trigger exists (so enumeration shouldn't terminate), but + * is not currently available. + * + * Other values are reserved for future use. + */ +#define CSR_TDATA1_TYPE_OFFSET (XLEN-4) +#define CSR_TDATA1_TYPE_LENGTH 4 +#define CSR_TDATA1_TYPE (0xfULL << CSR_TDATA1_TYPE_OFFSET) +/* + * If \FcsrTdataOneType is 0, then this bit is hard-wired to 0. + * + * 0: Both Debug and M-mode can write the {\tt tdata} registers at the + * selected \RcsrTselect. + * + * 1: Only Debug Mode can write the {\tt tdata} registers at the + * selected \RcsrTselect. Writes from other modes are ignored. + * + * This bit is only writable from Debug Mode. + * In ordinary use, external debuggers will always set this bit when + * configuring a trigger. + * When clearing this bit, debuggers should also set the action field + * (whose location depends on \FcsrTdataOneType) to something other + * than 1. + */ +#define CSR_TDATA1_DMODE_OFFSET (XLEN-5) +#define CSR_TDATA1_DMODE_LENGTH 1 +#define CSR_TDATA1_DMODE (0x1ULL << CSR_TDATA1_DMODE_OFFSET) +/* + * If \FcsrTdataOneType is 0, then this field is hard-wired to 0. + * + * Trigger-specific data. + */ +#define CSR_TDATA1_DATA_OFFSET 0 +#define CSR_TDATA1_DATA_LENGTH (XLEN - 5) +#define CSR_TDATA1_DATA (((1L << XLEN - 5) - 1) << CSR_TDATA1_DATA_OFFSET) +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA2_DATA_OFFSET 0 +#define CSR_TDATA2_DATA_LENGTH XLEN +#define CSR_TDATA2_DATA (((1L << XLEN) - 1) << CSR_TDATA2_DATA_OFFSET) +#define CSR_TDATA3 0x7a3 +#define CSR_TDATA3_DATA_OFFSET 0 +#define CSR_TDATA3_DATA_LENGTH XLEN +#define CSR_TDATA3_DATA (((1L << XLEN) - 1) << CSR_TDATA3_DATA_OFFSET) +#define CSR_TINFO 0x7a4 +/* + * One bit for each possible \FcsrTdataOneType enumerated in \RcsrTdataOne. Bit N + * corresponds to type N. If the bit is set, then that type is + * supported by the currently selected trigger. + * + * If the currently selected trigger doesn't exist, this field + * contains 1. + */ +#define CSR_TINFO_INFO_OFFSET 0 +#define CSR_TINFO_INFO_LENGTH 16 +#define CSR_TINFO_INFO (0xffffULL << CSR_TINFO_INFO_OFFSET) +#define CSR_TCONTROL 0x7a5 +/* + * M-mode previous trigger enable field. + * + * \FcsrTcontrolMpte and \FcsrTcontrolMte provide one solution to a problem + * regarding triggers with action=0 firing in M-mode trap handlers. See + * Section~\ref{sec:nativetrigger} for more details. + * + * When a trap into M-mode is taken, \FcsrTcontrolMpte is set to the value of + * \FcsrTcontrolMte. + */ +#define CSR_TCONTROL_MPTE_OFFSET 7 +#define CSR_TCONTROL_MPTE_LENGTH 1 +#define CSR_TCONTROL_MPTE (0x1ULL << CSR_TCONTROL_MPTE_OFFSET) +/* + * M-mode trigger enable field. + * + * 0: Triggers with action=0 do not match/fire while the hart is in M-mode. + * + * 1: Triggers do match/fire while the hart is in M-mode. + * + * When a trap into M-mode is taken, \FcsrTcontrolMte is set to 0. When {\tt + * mret} is executed, \FcsrTcontrolMte is set to the value of \FcsrTcontrolMpte. + */ +#define CSR_TCONTROL_MTE_OFFSET 3 +#define CSR_TCONTROL_MTE_LENGTH 1 +#define CSR_TCONTROL_MTE (0x1ULL << CSR_TCONTROL_MTE_OFFSET) +#define CSR_HCONTEXT 0x6a8 +/* + * Hypervisor mode software can write a context number to this register, + * which can be used to set triggers that only fire in that specific + * context. + * + * An implementation may tie any number of upper bits in this field to + * 0. If the H extension is not implemented, it's recommended to implement + * no more than 6 bits on RV32 and 13 on RV64 (as visible through the + * \RcsrMcontext register). If the H extension is implemented, + * it's recommended to implement no more than 7 bits on RV32 + * and 14 on RV64. + */ +#define CSR_HCONTEXT_HCONTEXT_OFFSET 0 +#define CSR_HCONTEXT_HCONTEXT_LENGTH XLEN +#define CSR_HCONTEXT_HCONTEXT (((1L << XLEN) - 1) << CSR_HCONTEXT_HCONTEXT_OFFSET) +#define CSR_SCONTEXT 0x5a8 +/* + * Supervisor mode software can write a context number to this + * register, which can be used to set triggers that only fire in that + * specific context. + * + * An implementation may tie any number of high bits in this field to + * 0. It's recommended to implement no more than 16 bits on RV32, and + * 34 on RV64. + */ +#define CSR_SCONTEXT_DATA_OFFSET 0 +#define CSR_SCONTEXT_DATA_LENGTH XLEN +#define CSR_SCONTEXT_DATA (((1L << XLEN) - 1) << CSR_SCONTEXT_DATA_OFFSET) +#define CSR_MCONTEXT 0x7a8 +#define CSR_MSCONTEXT 0x7aa +#define CSR_MCONTROL 0x7a1 +#define CSR_MCONTROL_TYPE_OFFSET (XLEN-4) +#define CSR_MCONTROL_TYPE_LENGTH 4 +#define CSR_MCONTROL_TYPE (0xfULL << CSR_MCONTROL_TYPE_OFFSET) +#define CSR_MCONTROL_DMODE_OFFSET (XLEN-5) +#define CSR_MCONTROL_DMODE_LENGTH 1 +#define CSR_MCONTROL_DMODE (0x1ULL << CSR_MCONTROL_DMODE_OFFSET) +/* + * Specifies the largest naturally aligned powers-of-two (NAPOT) range + * supported by the hardware when \FcsrMcontrolMatch is 1. The value is the + * logarithm base 2 of the number of bytes in that range. + * A value of 0 indicates \FcsrMcontrolMatch 1 is not supported. + * A value of 63 corresponds to the maximum NAPOT range, which is + * $2^{63}$ bytes in size. + */ +#define CSR_MCONTROL_MASKMAX_OFFSET (XLEN-11) +#define CSR_MCONTROL_MASKMAX_LENGTH 6 +#define CSR_MCONTROL_MASKMAX (0x3fULL << CSR_MCONTROL_MASKMAX_OFFSET) +/* + * This field only exists when XLEN is at least 64. + * It contains the 2 high bits of the access size. The low bits + * come from \FcsrMcontrolSizelo. See \FcsrMcontrolSizelo for how this + * is used. + */ +#define CSR_MCONTROL_SIZEHI_OFFSET 21 +#define CSR_MCONTROL_SIZEHI_LENGTH 2 +#define CSR_MCONTROL_SIZEHI (0x3ULL << CSR_MCONTROL_SIZEHI_OFFSET) +/* + * If this bit is implemented then it must become set when this + * trigger fires and may become set when this trigger matches. + * The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_MCONTROL_HIT_OFFSET 20 +#define CSR_MCONTROL_HIT_LENGTH 1 +#define CSR_MCONTROL_HIT (0x1ULL << CSR_MCONTROL_HIT_OFFSET) +/* + * This bit determines the contents of the XLEN-bit compare values. + * + * 0: There is at least one compare value and it contains the lowest + * virtual address of the access. + * It is recommended that there are additional compare values for + * the other accessed virtual addresses. + * (E.g. on a 32-bit read from 0x4000, the lowest address is 0x4000 + * and the other addresses are 0x4001, 0x4002, and 0x4003.) + * + * 1: There is exactly one compare value and it contains the data + * value loaded or stored, or the instruction executed. + * Any bits beyond the size of the data access will contain 0. + */ +#define CSR_MCONTROL_SELECT_OFFSET 19 +#define CSR_MCONTROL_SELECT_LENGTH 1 +#define CSR_MCONTROL_SELECT (0x1ULL << CSR_MCONTROL_SELECT_OFFSET) +/* + * 0: The action for this trigger will be taken just before the + * instruction that triggered it is committed, but after all preceding + * instructions are committed. \Rxepc or \RcsrDpc (depending + * on \FcsrMcontrolAction) must be set to the virtual address of the + * instruction that matched. + * + * If this is combined with \FcsrMcontrolLoad and + * \FcsrMcontrolSelect=1 then a memory access will be + * performed (including any side effects of performing such an access) even + * though the load will not update its destination register. Debuggers + * should consider this when setting such breakpoints on, for example, + * memory-mapped I/O addresses. + * + * 1: The action for this trigger will be taken after the instruction + * that triggered it is committed. It should be taken before the next + * instruction is committed, but it is better to implement triggers imprecisely + * than to not implement them at all. \Rxepc or + * \RcsrDpc (depending on \FcsrMcontrolAction) must be set to + * the virtual address of the next instruction that must be executed to + * preserve the program flow. + * + * Most hardware will only implement one timing or the other, possibly + * dependent on \FcsrMcontrolSelect, \FcsrMcontrolExecute, + * \FcsrMcontrolLoad, and \FcsrMcontrolStore. This bit + * primarily exists for the hardware to communicate to the debugger + * what will happen. Hardware may implement the bit fully writable, in + * which case the debugger has a little more control. + * + * Data load triggers with \FcsrMcontrolTiming of 0 will result in the same load + * happening again when the debugger lets the hart run. For data load + * triggers, debuggers must first attempt to set the breakpoint with + * \FcsrMcontrolTiming of 1. + * + * If a trigger with \FcsrMcontrolTiming of 0 matches, it is + * implementation-dependent whether that prevents a trigger with + * \FcsrMcontrolTiming of 1 matching as well. + */ +#define CSR_MCONTROL_TIMING_OFFSET 18 +#define CSR_MCONTROL_TIMING_LENGTH 1 +#define CSR_MCONTROL_TIMING (0x1ULL << CSR_MCONTROL_TIMING_OFFSET) +/* + * This field contains the 2 low bits of the access size. The high bits come + * from \FcsrMcontrolSizehi. The combined value is interpreted as follows: + * + * 0: The trigger will attempt to match against an access of any size. + * The behavior is only well-defined if $|select|=0$, or if the access + * size is XLEN. + * + * 1: The trigger will only match against 8-bit memory accesses. + * + * 2: The trigger will only match against 16-bit memory accesses or + * execution of 16-bit instructions. + * + * 3: The trigger will only match against 32-bit memory accesses or + * execution of 32-bit instructions. + * + * 4: The trigger will only match against execution of 48-bit instructions. + * + * 5: The trigger will only match against 64-bit memory accesses or + * execution of 64-bit instructions. + * + * 6: The trigger will only match against execution of 80-bit instructions. + * + * 7: The trigger will only match against execution of 96-bit instructions. + * + * 8: The trigger will only match against execution of 112-bit instructions. + * + * 9: The trigger will only match against 128-bit memory accesses or + * execution of 128-bit instructions. + * + * An implementation must support the value of 0, but all other values + * are optional. When an implementation supports address triggers + * (\FcsrMcontrolSelect=0), it is recommended that those triggers + * support every access size that the hart supports, as well as for + * every instruction size that the hart supports. + * + * Implementations such as RV32D or RV64V are able to perform loads + * and stores that are wider than XLEN. Custom extensions may also + * support instructions that are wider than XLEN. Because + * \RcsrTdataTwo is of size XLEN, there is a known limitation that + * data value triggers (\FcsrMcontrolSelect=1) can only be supported + * for access sizes up to XLEN bits. When an implementation supports + * data value triggers (\FcsrMcontrolSelect=1), it is recommended + * that those triggers support every access size up to XLEN that the + * hart supports, as well as for every instruction length up to XLEN + * that the hart supports. + */ +#define CSR_MCONTROL_SIZELO_OFFSET 16 +#define CSR_MCONTROL_SIZELO_LENGTH 2 +#define CSR_MCONTROL_SIZELO (0x3ULL << CSR_MCONTROL_SIZELO_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_MCONTROL_ACTION_OFFSET 12 +#define CSR_MCONTROL_ACTION_LENGTH 4 +#define CSR_MCONTROL_ACTION (0xfULL << CSR_MCONTROL_ACTION_OFFSET) +/* + * 0: When this trigger matches, the configured action is taken. + * + * 1: While this trigger does not match, it prevents the trigger with + * the next index from matching. + * + * A trigger chain starts on the first trigger with $|chain|=1$ after + * a trigger with $|chain|=0$, or simply on the first trigger if that + * has $|chain|=1$. It ends on the first trigger after that which has + * $|chain|=0$. This final trigger is part of the chain. The action + * on all but the final trigger is ignored. The action on that final + * trigger will be taken if and only if all the triggers in the chain + * match at the same time. + * + * Debuggers should not terminate a chain with a trigger with a + * different type. It is undefined when exactly such a chain fires. + * + * Because \FcsrMcontrolChain affects the next trigger, hardware must zero it in + * writes to \RcsrMcontrol that set \FcsrTdataOneDmode to 0 if the next trigger has + * \FcsrTdataOneDmode of 1. + * In addition hardware should ignore writes to \RcsrMcontrol that set + * \FcsrTdataOneDmode to 1 if the previous trigger has both \FcsrTdataOneDmode of 0 and + * \FcsrMcontrolChain of 1. Debuggers must avoid the latter case by checking + * \FcsrMcontrolChain on the previous trigger if they're writing \RcsrMcontrol. + * + * Implementations that wish to limit the maximum length of a trigger + * chain (eg. to meet timing requirements) may do so by zeroing + * \FcsrMcontrolChain in writes to \RcsrMcontrol that would make the chain too long. + */ +#define CSR_MCONTROL_CHAIN_OFFSET 11 +#define CSR_MCONTROL_CHAIN_LENGTH 1 +#define CSR_MCONTROL_CHAIN (0x1ULL << CSR_MCONTROL_CHAIN_OFFSET) +/* + * 0: Matches when any compare value equals \RcsrTdataTwo. + * + * 1: Matches when the top $M$ bits of any compare value match the top + * $M$ bits of \RcsrTdataTwo. + * $M$ is $|XLEN|-1$ minus the index of the least-significant + * bit containing 0 in \RcsrTdataTwo. Debuggers should only write values + * to \RcsrTdataTwo such that $M + $\FcsrMcontrolMaskmax$ \geq |XLEN|$ + * and $M\gt0$ , otherwise it's undefined on what conditions the + * trigger will match. + * + * 2: Matches when any compare value is greater than (unsigned) or + * equal to \RcsrTdataTwo. + * + * 3: Matches when any compare value is less than (unsigned) + * \RcsrTdataTwo. + * + * 4: Matches when $\frac{|XLEN|}{2}-1$:$0$ of any compare value + * equals $\frac{|XLEN|}{2}-1$:$0$ of \RcsrTdataTwo after + * $\frac{|XLEN|}{2}-1$:$0$ of the compare value is ANDed with + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of \RcsrTdataTwo. + * + * 5: Matches when $|XLEN|-1$:$\frac{|XLEN|}{2}$ of any compare + * value equals $\frac{|XLEN|}{2}-1$:$0$ of \RcsrTdataTwo after + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of the compare value is ANDed with + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of \RcsrTdataTwo. + * + * 8: Matches when \FcsrMcontrolMatch$=0$ would not match. + * + * 9: Matches when \FcsrMcontrolMatch$=1$ would not match. + * + * 12: Matches when \FcsrMcontrolMatch$=4$ would not match. + * + * 13: Matches when \FcsrMcontrolMatch$=5$ would not match. + * + * Other values are reserved for future use. + * + * All comparisons only look at the lower XLEN (in the current mode) + * bits of the compare values and of \RcsrTdataTwo. + * When \FcsrMcontrolSelect=1 and access size is N, this is further + * reduced, and comparisons only look at the lower N bits of the + * compare values and of \RcsrTdataTwo. + */ +#define CSR_MCONTROL_MATCH_OFFSET 7 +#define CSR_MCONTROL_MATCH_LENGTH 4 +#define CSR_MCONTROL_MATCH (0xfULL << CSR_MCONTROL_MATCH_OFFSET) +/* + * When set, enable this trigger in M-mode. + */ +#define CSR_MCONTROL_M_OFFSET 6 +#define CSR_MCONTROL_M_LENGTH 1 +#define CSR_MCONTROL_M (0x1ULL << CSR_MCONTROL_M_OFFSET) +/* + * When set, enable this trigger in S/HS-mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_MCONTROL_S_OFFSET 4 +#define CSR_MCONTROL_S_LENGTH 1 +#define CSR_MCONTROL_S (0x1ULL << CSR_MCONTROL_S_OFFSET) +/* + * When set, enable this trigger in U-mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_MCONTROL_U_OFFSET 3 +#define CSR_MCONTROL_U_LENGTH 1 +#define CSR_MCONTROL_U (0x1ULL << CSR_MCONTROL_U_OFFSET) +/* + * When set, the trigger fires on the virtual address or opcode of an + * instruction that is executed. + */ +#define CSR_MCONTROL_EXECUTE_OFFSET 2 +#define CSR_MCONTROL_EXECUTE_LENGTH 1 +#define CSR_MCONTROL_EXECUTE (0x1ULL << CSR_MCONTROL_EXECUTE_OFFSET) +/* + * When set, the trigger fires on the virtual address or data of any + * store. + */ +#define CSR_MCONTROL_STORE_OFFSET 1 +#define CSR_MCONTROL_STORE_LENGTH 1 +#define CSR_MCONTROL_STORE (0x1ULL << CSR_MCONTROL_STORE_OFFSET) +/* + * When set, the trigger fires on the virtual address or data of any + * load. + */ +#define CSR_MCONTROL_LOAD_OFFSET 0 +#define CSR_MCONTROL_LOAD_LENGTH 1 +#define CSR_MCONTROL_LOAD (0x1ULL << CSR_MCONTROL_LOAD_OFFSET) +#define CSR_MCONTROL6 0x7a1 +#define CSR_MCONTROL6_TYPE_OFFSET (XLEN-4) +#define CSR_MCONTROL6_TYPE_LENGTH 4 +#define CSR_MCONTROL6_TYPE (0xfULL << CSR_MCONTROL6_TYPE_OFFSET) +#define CSR_MCONTROL6_DMODE_OFFSET (XLEN-5) +#define CSR_MCONTROL6_DMODE_LENGTH 1 +#define CSR_MCONTROL6_DMODE (0x1ULL << CSR_MCONTROL6_DMODE_OFFSET) +/* + * When set, enable this trigger in VS-mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_MCONTROL6_VS_OFFSET 24 +#define CSR_MCONTROL6_VS_LENGTH 1 +#define CSR_MCONTROL6_VS (0x1ULL << CSR_MCONTROL6_VS_OFFSET) +/* + * When set, enable this trigger in VU-mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_MCONTROL6_VU_OFFSET 23 +#define CSR_MCONTROL6_VU_LENGTH 1 +#define CSR_MCONTROL6_VU (0x1ULL << CSR_MCONTROL6_VU_OFFSET) +/* + * If this bit is implemented then it must become set when this + * trigger fires and may become set when this trigger matches. + * The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_MCONTROL6_HIT_OFFSET 22 +#define CSR_MCONTROL6_HIT_LENGTH 1 +#define CSR_MCONTROL6_HIT (0x1ULL << CSR_MCONTROL6_HIT_OFFSET) +/* + * This bit determines the contents of the XLEN-bit compare values. + * + * 0: There is at least one compare value and it contains the lowest + * virtual address of the access. + * In addition, it is recommended that there are additional compare + * values for the other accessed virtual addresses match. + * (E.g. on a 32-bit read from 0x4000, the lowest address is 0x4000 + * and the other addresses are 0x4001, 0x4002, and 0x4003.) + * + * 1: There is exactly one compare value and it contains the data + * value loaded or stored, or the instruction executed. + * Any bits beyond the size of the data access will contain 0. + */ +#define CSR_MCONTROL6_SELECT_OFFSET 21 +#define CSR_MCONTROL6_SELECT_LENGTH 1 +#define CSR_MCONTROL6_SELECT (0x1ULL << CSR_MCONTROL6_SELECT_OFFSET) +/* + * 0: The action for this trigger will be taken just before the + * instruction that triggered it is committed, but after all preceding + * instructions are committed. \Rxepc or \RcsrDpc (depending + * on \FcsrMcontrolSixAction) must be set to the virtual address of the + * instruction that matched. + * + * If this is combined with \FcsrMcontrolSixLoad and + * \FcsrMcontrolSixSelect=1 then a memory access will be + * performed (including any side effects of performing such an access) even + * though the load will not update its destination register. Debuggers + * should consider this when setting such breakpoints on, for example, + * memory-mapped I/O addresses. + * + * 1: The action for this trigger will be taken after the instruction + * that triggered it is committed. It should be taken before the next + * instruction is committed, but it is better to implement triggers imprecisely + * than to not implement them at all. \Rxepc or + * \RcsrDpc (depending on \FcsrMcontrolSixAction) must be set to + * the virtual address of the next instruction that must be executed to + * preserve the program flow. + * + * Most hardware will only implement one timing or the other, possibly + * dependent on \FcsrMcontrolSixSelect, \FcsrMcontrolSixExecute, + * \FcsrMcontrolSixLoad, and \FcsrMcontrolSixStore. This bit + * primarily exists for the hardware to communicate to the debugger + * what will happen. Hardware may implement the bit fully writable, in + * which case the debugger has a little more control. + * + * Data load triggers with \FcsrMcontrolSixTiming of 0 will result in the same load + * happening again when the debugger lets the hart run. For data load + * triggers, debuggers must first attempt to set the breakpoint with + * \FcsrMcontrolSixTiming of 1. + * + * If a trigger with \FcsrMcontrolSixTiming of 0 matches, it is + * implementation-dependent whether that prevents a trigger with + * \FcsrMcontrolSixTiming of 1 matching as well. + */ +#define CSR_MCONTROL6_TIMING_OFFSET 20 +#define CSR_MCONTROL6_TIMING_LENGTH 1 +#define CSR_MCONTROL6_TIMING (0x1ULL << CSR_MCONTROL6_TIMING_OFFSET) +/* + * 0: The trigger will attempt to match against an access of any size. + * The behavior is only well-defined if $|select|=0$, or if the access + * size is XLEN. + * + * 1: The trigger will only match against 8-bit memory accesses. + * + * 2: The trigger will only match against 16-bit memory accesses or + * execution of 16-bit instructions. + * + * 3: The trigger will only match against 32-bit memory accesses or + * execution of 32-bit instructions. + * + * 4: The trigger will only match against execution of 48-bit instructions. + * + * 5: The trigger will only match against 64-bit memory accesses or + * execution of 64-bit instructions. + * + * 6: The trigger will only match against execution of 80-bit instructions. + * + * 7: The trigger will only match against execution of 96-bit instructions. + * + * 8: The trigger will only match against execution of 112-bit instructions. + * + * 9: The trigger will only match against 128-bit memory accesses or + * execution of 128-bit instructions. + * + * An implementation must support the value of 0, but all other values + * are optional. When an implementation supports address triggers + * (\FcsrMcontrolSixSelect=0), it is recommended that those triggers + * support every access size that the hart supports, as well as for + * every instruction size that the hart supports. + * + * Implementations such as RV32D or RV64V are able to perform loads + * and stores that are wider than XLEN. Custom extensions may also + * support instructions that are wider than XLEN. Because + * \RcsrTdataTwo is of size XLEN, there is a known limitation that + * data value triggers (\FcsrMcontrolSixSelect=1) can only be supported + * for access sizes up to XLEN bits. When an implementation supports + * data value triggers (\FcsrMcontrolSixSelect=1), it is recommended + * that those triggers support every access size up to XLEN that the + * hart supports, as well as for every instruction length up to XLEN + * that the hart supports. + */ +#define CSR_MCONTROL6_SIZE_OFFSET 16 +#define CSR_MCONTROL6_SIZE_LENGTH 4 +#define CSR_MCONTROL6_SIZE (0xfULL << CSR_MCONTROL6_SIZE_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_MCONTROL6_ACTION_OFFSET 12 +#define CSR_MCONTROL6_ACTION_LENGTH 4 +#define CSR_MCONTROL6_ACTION (0xfULL << CSR_MCONTROL6_ACTION_OFFSET) +/* + * 0: When this trigger matches, the configured action is taken. + * + * 1: While this trigger does not match, it prevents the trigger with + * the next index from matching. + * + * A trigger chain starts on the first trigger with $|chain|=1$ after + * a trigger with $|chain|=0$, or simply on the first trigger if that + * has $|chain|=1$. It ends on the first trigger after that which has + * $|chain|=0$. This final trigger is part of the chain. The action + * on all but the final trigger is ignored. The action on that final + * trigger will be taken if and only if all the triggers in the chain + * match at the same time. + * + * Debuggers should not terminate a chain with a trigger with a + * different type. It is undefined when exactly such a chain fires. + * + * Because \FcsrMcontrolSixChain affects the next trigger, hardware must zero it in + * writes to \RcsrMcontrolSix that set \FcsrTdataOneDmode to 0 if the next trigger has + * \FcsrTdataOneDmode of 1. + * In addition hardware should ignore writes to \RcsrMcontrolSix that set + * \FcsrTdataOneDmode to 1 if the previous trigger has both \FcsrTdataOneDmode of 0 and + * \FcsrMcontrolSixChain of 1. Debuggers must avoid the latter case by checking + * \FcsrMcontrolSixChain on the previous trigger if they're writing \RcsrMcontrolSix. + * + * Implementations that wish to limit the maximum length of a trigger + * chain (eg. to meet timing requirements) may do so by zeroing + * \FcsrMcontrolSixChain in writes to \RcsrMcontrolSix that would make the chain too long. + */ +#define CSR_MCONTROL6_CHAIN_OFFSET 11 +#define CSR_MCONTROL6_CHAIN_LENGTH 1 +#define CSR_MCONTROL6_CHAIN (0x1ULL << CSR_MCONTROL6_CHAIN_OFFSET) +/* + * 0: Matches when any compare value equals \RcsrTdataTwo. + * + * 1: Matches when the top $M$ bits of any compare value match the top + * $M$ bits of \RcsrTdataTwo. + * $M$ is $|XLEN|-1$ minus the index of the least-significant bit + * containing 0 in \RcsrTdataTwo. + * \RcsrTdataTwo is WARL and bit $|maskmax6|-1$ will be set to 0 if no + * less significant bits are written with 0. + * Legal values for \RcsrTdataTwo require $M + |maskmax6| \geq |XLEN|$ and $M\gt0$. + * See above for how to determine maskmax6. + * + * 2: Matches when any compare value is greater than (unsigned) or + * equal to \RcsrTdataTwo. + * + * 3: Matches when any compare value is less than (unsigned) + * \RcsrTdataTwo. + * + * 4: Matches when $\frac{|XLEN|}{2}-1$:$0$ of any compare value + * equals $\frac{|XLEN|}{2}-1$:$0$ of \RcsrTdataTwo after + * $\frac{|XLEN|}{2}-1$:$0$ of the compare value is ANDed with + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of \RcsrTdataTwo. + * + * 5: Matches when $|XLEN|-1$:$\frac{|XLEN|}{2}$ of any compare + * value equals $\frac{|XLEN|}{2}-1$:$0$ of \RcsrTdataTwo after + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of the compare value is ANDed with + * $|XLEN|-1$:$\frac{|XLEN|}{2}$ of \RcsrTdataTwo. + * + * 8: Matches when \FcsrMcontrolSixMatch$=0$ would not match. + * + * 9: Matches when \FcsrMcontrolSixMatch$=1$ would not match. + * + * 12: Matches when \FcsrMcontrolSixMatch$=4$ would not match. + * + * 13: Matches when \FcsrMcontrolSixMatch$=5$ would not match. + * + * Other values are reserved for future use. + * + * All comparisons only look at the lower XLEN (in the current mode) + * bits of the compare values and of \RcsrTdataTwo. + * When \FcsrMcontrolSelect=1 and access size is N, this is further + * reduced, and comparisons only look at the lower N bits of the + * compare values and of \RcsrTdataTwo. + */ +#define CSR_MCONTROL6_MATCH_OFFSET 7 +#define CSR_MCONTROL6_MATCH_LENGTH 4 +#define CSR_MCONTROL6_MATCH (0xfULL << CSR_MCONTROL6_MATCH_OFFSET) +/* + * When set, enable this trigger in M-mode. + */ +#define CSR_MCONTROL6_M_OFFSET 6 +#define CSR_MCONTROL6_M_LENGTH 1 +#define CSR_MCONTROL6_M (0x1ULL << CSR_MCONTROL6_M_OFFSET) +/* + * When set, enable this trigger in S/HS-mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_MCONTROL6_S_OFFSET 4 +#define CSR_MCONTROL6_S_LENGTH 1 +#define CSR_MCONTROL6_S (0x1ULL << CSR_MCONTROL6_S_OFFSET) +/* + * When set, enable this trigger in U-mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_MCONTROL6_U_OFFSET 3 +#define CSR_MCONTROL6_U_LENGTH 1 +#define CSR_MCONTROL6_U (0x1ULL << CSR_MCONTROL6_U_OFFSET) +/* + * When set, the trigger fires on the virtual address or opcode of an + * instruction that is executed. + */ +#define CSR_MCONTROL6_EXECUTE_OFFSET 2 +#define CSR_MCONTROL6_EXECUTE_LENGTH 1 +#define CSR_MCONTROL6_EXECUTE (0x1ULL << CSR_MCONTROL6_EXECUTE_OFFSET) +/* + * When set, the trigger fires on the virtual address or data of any + * store. + */ +#define CSR_MCONTROL6_STORE_OFFSET 1 +#define CSR_MCONTROL6_STORE_LENGTH 1 +#define CSR_MCONTROL6_STORE (0x1ULL << CSR_MCONTROL6_STORE_OFFSET) +/* + * When set, the trigger fires on the virtual address or data of any + * load. + */ +#define CSR_MCONTROL6_LOAD_OFFSET 0 +#define CSR_MCONTROL6_LOAD_LENGTH 1 +#define CSR_MCONTROL6_LOAD (0x1ULL << CSR_MCONTROL6_LOAD_OFFSET) +#define CSR_ICOUNT 0x7a1 +#define CSR_ICOUNT_TYPE_OFFSET (XLEN-4) +#define CSR_ICOUNT_TYPE_LENGTH 4 +#define CSR_ICOUNT_TYPE (0xfULL << CSR_ICOUNT_TYPE_OFFSET) +#define CSR_ICOUNT_DMODE_OFFSET (XLEN-5) +#define CSR_ICOUNT_DMODE_LENGTH 1 +#define CSR_ICOUNT_DMODE (0x1ULL << CSR_ICOUNT_DMODE_OFFSET) +/* + * When set, enable this trigger in VS-mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ICOUNT_VS_OFFSET 26 +#define CSR_ICOUNT_VS_LENGTH 1 +#define CSR_ICOUNT_VS (0x1ULL << CSR_ICOUNT_VS_OFFSET) +/* + * When set, enable this trigger in VU-mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ICOUNT_VU_OFFSET 25 +#define CSR_ICOUNT_VU_LENGTH 1 +#define CSR_ICOUNT_VU (0x1ULL << CSR_ICOUNT_VU_OFFSET) +/* + * If this bit is implemented, the hardware sets it when this + * trigger matches. The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_ICOUNT_HIT_OFFSET 24 +#define CSR_ICOUNT_HIT_LENGTH 1 +#define CSR_ICOUNT_HIT (0x1ULL << CSR_ICOUNT_HIT_OFFSET) +/* + * When count is decremented to 0, the trigger fires. Instead of + * changing \FcsrIcountCount from 1 to 0, it is also acceptable for hardware to + * clear \FcsrIcountM, \FcsrIcountS, \FcsrIcountU, \FcsrIcountVs, and + * \FcsrIcountVu. This allows \FcsrIcountCount to be hard-wired + * to 1 if this register just exists for single step. + */ +#define CSR_ICOUNT_COUNT_OFFSET 10 +#define CSR_ICOUNT_COUNT_LENGTH 14 +#define CSR_ICOUNT_COUNT (0x3fffULL << CSR_ICOUNT_COUNT_OFFSET) +/* + * When set, enable this trigger in M-mode. + */ +#define CSR_ICOUNT_M_OFFSET 9 +#define CSR_ICOUNT_M_LENGTH 1 +#define CSR_ICOUNT_M (0x1ULL << CSR_ICOUNT_M_OFFSET) +/* + * This bit becomes set when \FcsrIcountCount is decremented from 1 + * to 0. It is cleared when the trigger fires. + */ +#define CSR_ICOUNT_PENDING_OFFSET 8 +#define CSR_ICOUNT_PENDING_LENGTH 1 +#define CSR_ICOUNT_PENDING (0x1ULL << CSR_ICOUNT_PENDING_OFFSET) +/* + * When set, enable this trigger in S/HS-mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_ICOUNT_S_OFFSET 7 +#define CSR_ICOUNT_S_LENGTH 1 +#define CSR_ICOUNT_S (0x1ULL << CSR_ICOUNT_S_OFFSET) +/* + * When set, enable this trigger in U-mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_ICOUNT_U_OFFSET 6 +#define CSR_ICOUNT_U_LENGTH 1 +#define CSR_ICOUNT_U (0x1ULL << CSR_ICOUNT_U_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_ICOUNT_ACTION_OFFSET 0 +#define CSR_ICOUNT_ACTION_LENGTH 6 +#define CSR_ICOUNT_ACTION (0x3fULL << CSR_ICOUNT_ACTION_OFFSET) +#define CSR_ITRIGGER 0x7a1 +#define CSR_ITRIGGER_TYPE_OFFSET (XLEN-4) +#define CSR_ITRIGGER_TYPE_LENGTH 4 +#define CSR_ITRIGGER_TYPE (0xfULL << CSR_ITRIGGER_TYPE_OFFSET) +#define CSR_ITRIGGER_DMODE_OFFSET (XLEN-5) +#define CSR_ITRIGGER_DMODE_LENGTH 1 +#define CSR_ITRIGGER_DMODE (0x1ULL << CSR_ITRIGGER_DMODE_OFFSET) +/* + * If this bit is implemented, the hardware sets it when this + * trigger matches. The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_ITRIGGER_HIT_OFFSET (XLEN-6) +#define CSR_ITRIGGER_HIT_LENGTH 1 +#define CSR_ITRIGGER_HIT (0x1ULL << CSR_ITRIGGER_HIT_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from VS + * mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ITRIGGER_VS_OFFSET 12 +#define CSR_ITRIGGER_VS_LENGTH 1 +#define CSR_ITRIGGER_VS (0x1ULL << CSR_ITRIGGER_VS_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from VU + * mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ITRIGGER_VU_OFFSET 11 +#define CSR_ITRIGGER_VU_LENGTH 1 +#define CSR_ITRIGGER_VU (0x1ULL << CSR_ITRIGGER_VU_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from M + * mode. + */ +#define CSR_ITRIGGER_M_OFFSET 9 +#define CSR_ITRIGGER_M_LENGTH 1 +#define CSR_ITRIGGER_M (0x1ULL << CSR_ITRIGGER_M_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from S/HS + * mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_ITRIGGER_S_OFFSET 7 +#define CSR_ITRIGGER_S_LENGTH 1 +#define CSR_ITRIGGER_S (0x1ULL << CSR_ITRIGGER_S_OFFSET) +/* + * When set, enable this trigger for interrupts that are taken from U + * mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_ITRIGGER_U_OFFSET 6 +#define CSR_ITRIGGER_U_LENGTH 1 +#define CSR_ITRIGGER_U (0x1ULL << CSR_ITRIGGER_U_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_ITRIGGER_ACTION_OFFSET 0 +#define CSR_ITRIGGER_ACTION_LENGTH 6 +#define CSR_ITRIGGER_ACTION (0x3fULL << CSR_ITRIGGER_ACTION_OFFSET) +#define CSR_ETRIGGER 0x7a1 +#define CSR_ETRIGGER_TYPE_OFFSET (XLEN-4) +#define CSR_ETRIGGER_TYPE_LENGTH 4 +#define CSR_ETRIGGER_TYPE (0xfULL << CSR_ETRIGGER_TYPE_OFFSET) +#define CSR_ETRIGGER_DMODE_OFFSET (XLEN-5) +#define CSR_ETRIGGER_DMODE_LENGTH 1 +#define CSR_ETRIGGER_DMODE (0x1ULL << CSR_ETRIGGER_DMODE_OFFSET) +/* + * If this bit is implemented, the hardware sets it when this + * trigger matches. The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_ETRIGGER_HIT_OFFSET (XLEN-6) +#define CSR_ETRIGGER_HIT_LENGTH 1 +#define CSR_ETRIGGER_HIT (0x1ULL << CSR_ETRIGGER_HIT_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from VS + * mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ETRIGGER_VS_OFFSET 12 +#define CSR_ETRIGGER_VS_LENGTH 1 +#define CSR_ETRIGGER_VS (0x1ULL << CSR_ETRIGGER_VS_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from VU + * mode. + * This bit is hard-wired to 0 if the hart does not support + * virtualization mode. + */ +#define CSR_ETRIGGER_VU_OFFSET 11 +#define CSR_ETRIGGER_VU_LENGTH 1 +#define CSR_ETRIGGER_VU (0x1ULL << CSR_ETRIGGER_VU_OFFSET) +/* + * When set, non-maskable interrupts cause this + * trigger to fire, regardless of the values of \FcsrEtriggerM, + * \FcsrEtriggerS, \FcsrEtriggerU, \FcsrEtriggerVs, and \FcsrEtriggerVu. + */ +#define CSR_ETRIGGER_NMI_OFFSET 10 +#define CSR_ETRIGGER_NMI_LENGTH 1 +#define CSR_ETRIGGER_NMI (0x1ULL << CSR_ETRIGGER_NMI_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from M + * mode. + */ +#define CSR_ETRIGGER_M_OFFSET 9 +#define CSR_ETRIGGER_M_LENGTH 1 +#define CSR_ETRIGGER_M (0x1ULL << CSR_ETRIGGER_M_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from S/HS + * mode. + * This bit is hard-wired to 0 if the hart does not support + * S-mode. + */ +#define CSR_ETRIGGER_S_OFFSET 7 +#define CSR_ETRIGGER_S_LENGTH 1 +#define CSR_ETRIGGER_S (0x1ULL << CSR_ETRIGGER_S_OFFSET) +/* + * When set, enable this trigger for exceptions that are taken from U + * mode. + * This bit is hard-wired to 0 if the hart does not support + * U-mode. + */ +#define CSR_ETRIGGER_U_OFFSET 6 +#define CSR_ETRIGGER_U_LENGTH 1 +#define CSR_ETRIGGER_U (0x1ULL << CSR_ETRIGGER_U_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_ETRIGGER_ACTION_OFFSET 0 +#define CSR_ETRIGGER_ACTION_LENGTH 6 +#define CSR_ETRIGGER_ACTION (0x3fULL << CSR_ETRIGGER_ACTION_OFFSET) +#define CSR_TMEXTTRIGGER 0x7a1 +#define CSR_TMEXTTRIGGER_TYPE_OFFSET (XLEN-4) +#define CSR_TMEXTTRIGGER_TYPE_LENGTH 4 +#define CSR_TMEXTTRIGGER_TYPE (0xfULL << CSR_TMEXTTRIGGER_TYPE_OFFSET) +#define CSR_TMEXTTRIGGER_DMODE_OFFSET (XLEN-5) +#define CSR_TMEXTTRIGGER_DMODE_LENGTH 1 +#define CSR_TMEXTTRIGGER_DMODE (0x1ULL << CSR_TMEXTTRIGGER_DMODE_OFFSET) +/* + * If this bit is implemented, the hardware sets it when this + * trigger matches. The trigger's user can set or clear it at any + * time. It is used to determine which + * trigger(s) matched. If the bit is not implemented, it is always 0 + * and writing it has no effect. + */ +#define CSR_TMEXTTRIGGER_HIT_OFFSET (XLEN-6) +#define CSR_TMEXTTRIGGER_HIT_LENGTH 1 +#define CSR_TMEXTTRIGGER_HIT (0x1ULL << CSR_TMEXTTRIGGER_HIT_OFFSET) +/* + * This optional bit, when set, causes this trigger to fire whenever an attached + * interrupt controller signals a trigger. + */ +#define CSR_TMEXTTRIGGER_INTCTL_OFFSET 22 +#define CSR_TMEXTTRIGGER_INTCTL_LENGTH 1 +#define CSR_TMEXTTRIGGER_INTCTL (0x1ULL << CSR_TMEXTTRIGGER_INTCTL_OFFSET) +/* + * Selects any combination of up to 16 external debug trigger inputs + * that cause this trigger to fire. + */ +#define CSR_TMEXTTRIGGER_SELECT_OFFSET 6 +#define CSR_TMEXTTRIGGER_SELECT_LENGTH 16 +#define CSR_TMEXTTRIGGER_SELECT (0xffffULL << CSR_TMEXTTRIGGER_SELECT_OFFSET) +/* + * The action to take when the trigger fires. The values are explained + * in Table~\ref{tab:action}. + */ +#define CSR_TMEXTTRIGGER_ACTION_OFFSET 0 +#define CSR_TMEXTTRIGGER_ACTION_LENGTH 6 +#define CSR_TMEXTTRIGGER_ACTION (0x3fULL << CSR_TMEXTTRIGGER_ACTION_OFFSET) +#define CSR_TEXTRA32 0x7a3 +/* + * Data used together with \FcsrTextraThirtytwoMhselect. + */ +#define CSR_TEXTRA32_MHVALUE_OFFSET 26 +#define CSR_TEXTRA32_MHVALUE_LENGTH 6 +#define CSR_TEXTRA32_MHVALUE (0x3fU << CSR_TEXTRA32_MHVALUE_OFFSET) +/* + * 0: Ignore \FcsrTextraThirtytwoMhvalue. + * + * 4: This trigger will only match if the low bits of + * \RcsrMcontext/\RcsrHcontext equal \FcsrTextraThirtytwoMhvalue. + * + * 1, 5: This trigger will only match if the low bits of + * \RcsrMcontext/\RcsrHcontext equal \{\FcsrTextraThirtytwoMhvalue, mhselect[2]\}. + * + * 2, 6: This trigger will only match if VMID in hgatp equals the lower VMIDMAX + * (defined in the Privileged Spec) bits of \{\FcsrTextraThirtytwoMhvalue, mhselect[2]\}. + * + * 3, 7: Reserved. + * + * If the H extension is not supported, the only legal values are 0 and 4. + */ +#define CSR_TEXTRA32_MHSELECT_OFFSET 23 +#define CSR_TEXTRA32_MHSELECT_LENGTH 3 +#define CSR_TEXTRA32_MHSELECT (0x7U << CSR_TEXTRA32_MHSELECT_OFFSET) +/* + * When the least significant bit of this field is 1, it causes bits 7:0 + * in the comparison to be ignored, when \FcsrTextraThirtytwoSselect=1. + * When the next most significant bit of this field is 1, it causes bits 15:8 + * to be ignored in the comparison, when \FcsrTextraThirtytwoSselect=1. + */ +#define CSR_TEXTRA32_SBYTEMASK_OFFSET 18 +#define CSR_TEXTRA32_SBYTEMASK_LENGTH 2 +#define CSR_TEXTRA32_SBYTEMASK (0x3U << CSR_TEXTRA32_SBYTEMASK_OFFSET) +/* + * Data used together with \FcsrTextraThirtytwoSselect. + * + * This field should be tied to 0 when S-mode is not supported. + */ +#define CSR_TEXTRA32_SVALUE_OFFSET 2 +#define CSR_TEXTRA32_SVALUE_LENGTH 16 +#define CSR_TEXTRA32_SVALUE (0xffffU << CSR_TEXTRA32_SVALUE_OFFSET) +/* + * 0: Ignore \FcsrTextraThirtytwoSvalue. + * + * 1: This trigger will only match if the low bits of + * \RcsrScontext equal \FcsrTextraThirtytwoSvalue. + * + * 2: This trigger will only match if: + * \begin{itemize}[noitemsep,nolistsep] + * \item the mode is VS-mode or VU-mode and ASID in \Rvsatp + * equals the lower ASIDMAX (defined in the Privileged Spec) bits + * of \FcsrTextraThirtytwoSvalue. + * \item in all other modes, ASID in \Rsatp equals the lower + * ASIDMAX (defined in the Privileged Spec) bits of + * \FcsrTextraThirtytwoSvalue. + * \end{itemize} + * + * This field should be tied to 0 when S-mode is not supported. + */ +#define CSR_TEXTRA32_SSELECT_OFFSET 0 +#define CSR_TEXTRA32_SSELECT_LENGTH 2 +#define CSR_TEXTRA32_SSELECT (0x3U << CSR_TEXTRA32_SSELECT_OFFSET) +#define CSR_TEXTRA64 0x7a3 +#define CSR_TEXTRA64_MHVALUE_OFFSET 51 +#define CSR_TEXTRA64_MHVALUE_LENGTH 13 +#define CSR_TEXTRA64_MHVALUE (0x1fffULL << CSR_TEXTRA64_MHVALUE_OFFSET) +#define CSR_TEXTRA64_MHSELECT_OFFSET 48 +#define CSR_TEXTRA64_MHSELECT_LENGTH 3 +#define CSR_TEXTRA64_MHSELECT (0x7ULL << CSR_TEXTRA64_MHSELECT_OFFSET) +/* + * When the least significant bit of this field is 1, it causes bits 7:0 + * in the comparison to be ignored, when \FcsrTextraSixtyfourSselect=1. + * Likewise, the second bit controls the comparison of bits 15:8, + * third bit controls the comparison of bits 23:16, + * fourth bit controls the comparison of bits 31:24, and + * fifth bit controls the comparison of bits 33:32. + */ +#define CSR_TEXTRA64_SBYTEMASK_OFFSET 36 +#define CSR_TEXTRA64_SBYTEMASK_LENGTH 5 +#define CSR_TEXTRA64_SBYTEMASK (0x1fULL << CSR_TEXTRA64_SBYTEMASK_OFFSET) +#define CSR_TEXTRA64_SVALUE_OFFSET 2 +#define CSR_TEXTRA64_SVALUE_LENGTH 34 +#define CSR_TEXTRA64_SVALUE (0x3ffffffffULL << CSR_TEXTRA64_SVALUE_OFFSET) +#define CSR_TEXTRA64_SSELECT_OFFSET 0 +#define CSR_TEXTRA64_SSELECT_LENGTH 2 +#define CSR_TEXTRA64_SSELECT (0x3ULL << CSR_TEXTRA64_SSELECT_OFFSET) +#define DM_DMSTATUS 0x11 +/* + * 0: Unimplemented, or \FdmDmcontrolNdmreset is zero and no ndmreset is currently + * in progress. + * + * 1: \FdmDmcontrolNdmreset is currently nonzero, or there is an ndmreset in progress. + */ +#define DM_DMSTATUS_NDMRESETPENDING_OFFSET 24 +#define DM_DMSTATUS_NDMRESETPENDING_LENGTH 1 +#define DM_DMSTATUS_NDMRESETPENDING (0x1U << DM_DMSTATUS_NDMRESETPENDING_OFFSET) +/* + * 0: The per-hart {\tt unavail} bits reflect the current state of the hart. + * + * 1: The per-hart {\tt unavail} bits are sticky. Once they are set, they will + * not clear until the debugger acknowledges them using \FdmDmcontrolAckunavail. + */ +#define DM_DMSTATUS_STICKYUNAVAIL_OFFSET 23 +#define DM_DMSTATUS_STICKYUNAVAIL_LENGTH 1 +#define DM_DMSTATUS_STICKYUNAVAIL (0x1U << DM_DMSTATUS_STICKYUNAVAIL_OFFSET) +/* + * If 1, then there is an implicit {\tt ebreak} instruction at the + * non-existent word immediately after the Program Buffer. This saves + * the debugger from having to write the {\tt ebreak} itself, and + * allows the Program Buffer to be one word smaller. + * + * This must be 1 when \FdmAbstractcsProgbufsize is 1. + */ +#define DM_DMSTATUS_IMPEBREAK_OFFSET 22 +#define DM_DMSTATUS_IMPEBREAK_LENGTH 1 +#define DM_DMSTATUS_IMPEBREAK (0x1U << DM_DMSTATUS_IMPEBREAK_OFFSET) +/* + * This field is 1 when all currently selected harts have been reset + * and reset has not been acknowledged for any of them. + */ +#define DM_DMSTATUS_ALLHAVERESET_OFFSET 19 +#define DM_DMSTATUS_ALLHAVERESET_LENGTH 1 +#define DM_DMSTATUS_ALLHAVERESET (0x1U << DM_DMSTATUS_ALLHAVERESET_OFFSET) +/* + * This field is 1 when at least one currently selected hart has been + * reset and reset has not been acknowledged for that hart. + */ +#define DM_DMSTATUS_ANYHAVERESET_OFFSET 18 +#define DM_DMSTATUS_ANYHAVERESET_LENGTH 1 +#define DM_DMSTATUS_ANYHAVERESET (0x1U << DM_DMSTATUS_ANYHAVERESET_OFFSET) +/* + * This field is 1 when all currently selected harts have their + * resume ack bit\index{resume ack bit} set. + */ +#define DM_DMSTATUS_ALLRESUMEACK_OFFSET 17 +#define DM_DMSTATUS_ALLRESUMEACK_LENGTH 1 +#define DM_DMSTATUS_ALLRESUMEACK (0x1U << DM_DMSTATUS_ALLRESUMEACK_OFFSET) +/* + * This field is 1 when any currently selected hart has its + * resume ack bit\index{resume ack bit} set. + */ +#define DM_DMSTATUS_ANYRESUMEACK_OFFSET 16 +#define DM_DMSTATUS_ANYRESUMEACK_LENGTH 1 +#define DM_DMSTATUS_ANYRESUMEACK (0x1U << DM_DMSTATUS_ANYRESUMEACK_OFFSET) +/* + * This field is 1 when all currently selected harts do not exist in + * this hardware platform. + */ +#define DM_DMSTATUS_ALLNONEXISTENT_OFFSET 15 +#define DM_DMSTATUS_ALLNONEXISTENT_LENGTH 1 +#define DM_DMSTATUS_ALLNONEXISTENT (0x1U << DM_DMSTATUS_ALLNONEXISTENT_OFFSET) +/* + * This field is 1 when any currently selected hart does not exist in + * this hardware platform. + */ +#define DM_DMSTATUS_ANYNONEXISTENT_OFFSET 14 +#define DM_DMSTATUS_ANYNONEXISTENT_LENGTH 1 +#define DM_DMSTATUS_ANYNONEXISTENT (0x1U << DM_DMSTATUS_ANYNONEXISTENT_OFFSET) +/* + * This field is 1 when all currently selected harts are + * unavailable, or (if \FdmDmstatusStickyunavail is 1) were + * unavailable without that being acknowledged. + */ +#define DM_DMSTATUS_ALLUNAVAIL_OFFSET 13 +#define DM_DMSTATUS_ALLUNAVAIL_LENGTH 1 +#define DM_DMSTATUS_ALLUNAVAIL (0x1U << DM_DMSTATUS_ALLUNAVAIL_OFFSET) +/* + * This field is 1 when any currently selected hart is unavailable, + * or (if \FdmDmstatusStickyunavail is 1) was unavailable without + * that being acknowledged. + */ +#define DM_DMSTATUS_ANYUNAVAIL_OFFSET 12 +#define DM_DMSTATUS_ANYUNAVAIL_LENGTH 1 +#define DM_DMSTATUS_ANYUNAVAIL (0x1U << DM_DMSTATUS_ANYUNAVAIL_OFFSET) +/* + * This field is 1 when all currently selected harts are running. + */ +#define DM_DMSTATUS_ALLRUNNING_OFFSET 11 +#define DM_DMSTATUS_ALLRUNNING_LENGTH 1 +#define DM_DMSTATUS_ALLRUNNING (0x1U << DM_DMSTATUS_ALLRUNNING_OFFSET) +/* + * This field is 1 when any currently selected hart is running. + */ +#define DM_DMSTATUS_ANYRUNNING_OFFSET 10 +#define DM_DMSTATUS_ANYRUNNING_LENGTH 1 +#define DM_DMSTATUS_ANYRUNNING (0x1U << DM_DMSTATUS_ANYRUNNING_OFFSET) +/* + * This field is 1 when all currently selected harts are halted. + */ +#define DM_DMSTATUS_ALLHALTED_OFFSET 9 +#define DM_DMSTATUS_ALLHALTED_LENGTH 1 +#define DM_DMSTATUS_ALLHALTED (0x1U << DM_DMSTATUS_ALLHALTED_OFFSET) +/* + * This field is 1 when any currently selected hart is halted. + */ +#define DM_DMSTATUS_ANYHALTED_OFFSET 8 +#define DM_DMSTATUS_ANYHALTED_LENGTH 1 +#define DM_DMSTATUS_ANYHALTED (0x1U << DM_DMSTATUS_ANYHALTED_OFFSET) +/* + * 0: Authentication is required before using the DM. + * + * 1: The authentication check has passed. + * + * On components that don't implement authentication, this bit must be + * preset as 1. + */ +#define DM_DMSTATUS_AUTHENTICATED_OFFSET 7 +#define DM_DMSTATUS_AUTHENTICATED_LENGTH 1 +#define DM_DMSTATUS_AUTHENTICATED (0x1U << DM_DMSTATUS_AUTHENTICATED_OFFSET) +/* + * 0: The authentication module is ready to process the next + * read/write to \RdmAuthdata. + * + * 1: The authentication module is busy. Accessing \RdmAuthdata results + * in unspecified behavior. + * + * \FdmDmstatusAuthbusy only becomes set in immediate response to an access to + * \RdmAuthdata. + */ +#define DM_DMSTATUS_AUTHBUSY_OFFSET 6 +#define DM_DMSTATUS_AUTHBUSY_LENGTH 1 +#define DM_DMSTATUS_AUTHBUSY (0x1U << DM_DMSTATUS_AUTHBUSY_OFFSET) +/* + * 1 if this Debug Module supports halt-on-reset functionality + * controllable by the \FdmDmcontrolSetresethaltreq and \FdmDmcontrolClrresethaltreq bits. + * 0 otherwise. + */ +#define DM_DMSTATUS_HASRESETHALTREQ_OFFSET 5 +#define DM_DMSTATUS_HASRESETHALTREQ_LENGTH 1 +#define DM_DMSTATUS_HASRESETHALTREQ (0x1U << DM_DMSTATUS_HASRESETHALTREQ_OFFSET) +/* + * 0: \RdmConfstrptrZero--\RdmConfstrptrThree hold information which + * is not relevant to the configuration string. + * + * 1: \RdmConfstrptrZero--\RdmConfstrptrThree hold the address of the + * configuration string. + */ +#define DM_DMSTATUS_CONFSTRPTRVALID_OFFSET 4 +#define DM_DMSTATUS_CONFSTRPTRVALID_LENGTH 1 +#define DM_DMSTATUS_CONFSTRPTRVALID (0x1U << DM_DMSTATUS_CONFSTRPTRVALID_OFFSET) +/* + * 0: There is no Debug Module present. + * + * 1: There is a Debug Module and it conforms to version 0.11 of this + * specification. + * + * 2: There is a Debug Module and it conforms to version 0.13 of this + * specification. + * + * 3: There is a Debug Module and it conforms to version 1.0 of this + * specification. + * + * 15: There is a Debug Module but it does not conform to any + * available version of this spec. + */ +#define DM_DMSTATUS_VERSION_OFFSET 0 +#define DM_DMSTATUS_VERSION_LENGTH 4 +#define DM_DMSTATUS_VERSION (0xfU << DM_DMSTATUS_VERSION_OFFSET) +#define DM_DMCONTROL 0x10 +/* + * Writing 0 clears the halt request bit for all currently selected + * harts. This may cancel outstanding halt requests for those harts. + * + * Writing 1 sets the halt request bit for all currently selected + * harts. Running harts will halt whenever their halt request bit is + * set. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_HALTREQ_OFFSET 31 +#define DM_DMCONTROL_HALTREQ_LENGTH 1 +#define DM_DMCONTROL_HALTREQ (0x1U << DM_DMCONTROL_HALTREQ_OFFSET) +/* + * Writing 1 causes the currently selected harts to resume once, if + * they are halted when the write occurs. It also clears the resume + * ack bit for those harts. + * + * \FdmDmcontrolResumereq is ignored if \FdmDmcontrolHaltreq is set. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_RESUMEREQ_OFFSET 30 +#define DM_DMCONTROL_RESUMEREQ_LENGTH 1 +#define DM_DMCONTROL_RESUMEREQ (0x1U << DM_DMCONTROL_RESUMEREQ_OFFSET) +/* + * This optional field writes the reset bit for all the currently + * selected harts. To perform a reset the debugger writes 1, and then + * writes 0 to deassert the reset signal. + * + * While this bit is 1, the debugger must not change which harts are + * selected. + * + * If this feature is not implemented, the bit always stays 0, so + * after writing 1 the debugger can read the register back to see if + * the feature is supported. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_HARTRESET_OFFSET 29 +#define DM_DMCONTROL_HARTRESET_LENGTH 1 +#define DM_DMCONTROL_HARTRESET (0x1U << DM_DMCONTROL_HARTRESET_OFFSET) +/* + * 0: No effect. + * + * 1: Clears {\tt havereset} for any selected harts. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_ACKHAVERESET_OFFSET 28 +#define DM_DMCONTROL_ACKHAVERESET_LENGTH 1 +#define DM_DMCONTROL_ACKHAVERESET (0x1U << DM_DMCONTROL_ACKHAVERESET_OFFSET) +/* + * 0: No effect. + * + * 1: Clears {\tt unavail} for any selected harts. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_ACKUNAVAIL_OFFSET 27 +#define DM_DMCONTROL_ACKUNAVAIL_LENGTH 1 +#define DM_DMCONTROL_ACKUNAVAIL (0x1U << DM_DMCONTROL_ACKUNAVAIL_OFFSET) +/* + * Selects the definition of currently selected harts. + * + * 0: There is a single currently selected hart, that is selected by \Fhartsel. + * + * 1: There may be multiple currently selected harts -- the hart + * selected by \Fhartsel, plus those selected by the hart array mask + * register. + * + * An implementation which does not implement the hart array mask register + * must tie this field to 0. A debugger which wishes to use the hart array + * mask register feature should set this bit and read back to see if the functionality + * is supported. + */ +#define DM_DMCONTROL_HASEL_OFFSET 26 +#define DM_DMCONTROL_HASEL_LENGTH 1 +#define DM_DMCONTROL_HASEL (0x1U << DM_DMCONTROL_HASEL_OFFSET) +/* + * The low 10 bits of \Fhartsel: the DM-specific index of the hart to + * select. This hart is always part of the currently selected harts. + */ +#define DM_DMCONTROL_HARTSELLO_OFFSET 16 +#define DM_DMCONTROL_HARTSELLO_LENGTH 10 +#define DM_DMCONTROL_HARTSELLO (0x3ffU << DM_DMCONTROL_HARTSELLO_OFFSET) +/* + * The high 10 bits of \Fhartsel: the DM-specific index of the hart to + * select. This hart is always part of the currently selected harts. + */ +#define DM_DMCONTROL_HARTSELHI_OFFSET 6 +#define DM_DMCONTROL_HARTSELHI_LENGTH 10 +#define DM_DMCONTROL_HARTSELHI (0x3ffU << DM_DMCONTROL_HARTSELHI_OFFSET) +/* + * This optional field sets \Fkeepalive for all currently selected + * harts, unless \FdmDmcontrolClrkeepalive is simultaneously set to + * 1. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_SETKEEPALIVE_OFFSET 5 +#define DM_DMCONTROL_SETKEEPALIVE_LENGTH 1 +#define DM_DMCONTROL_SETKEEPALIVE (0x1U << DM_DMCONTROL_SETKEEPALIVE_OFFSET) +/* + * This optional field clears \Fkeepalive for all currently selected + * harts. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_CLRKEEPALIVE_OFFSET 4 +#define DM_DMCONTROL_CLRKEEPALIVE_LENGTH 1 +#define DM_DMCONTROL_CLRKEEPALIVE (0x1U << DM_DMCONTROL_CLRKEEPALIVE_OFFSET) +/* + * This optional field writes the halt-on-reset request bit for all + * currently selected harts, unless \FdmDmcontrolClrresethaltreq is + * simultaneously set to 1. + * When set to 1, each selected hart will halt upon the next deassertion + * of its reset. The halt-on-reset request bit is not automatically + * cleared. The debugger must write to \FdmDmcontrolClrresethaltreq to clear it. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + * + * If \FdmDmstatusHasresethaltreq is 0, this field is not implemented. + */ +#define DM_DMCONTROL_SETRESETHALTREQ_OFFSET 3 +#define DM_DMCONTROL_SETRESETHALTREQ_LENGTH 1 +#define DM_DMCONTROL_SETRESETHALTREQ (0x1U << DM_DMCONTROL_SETRESETHALTREQ_OFFSET) +/* + * This optional field clears the halt-on-reset request bit for all + * currently selected harts. + * + * Writes apply to the new value of \Fhartsel and \FdmDmcontrolHasel. + */ +#define DM_DMCONTROL_CLRRESETHALTREQ_OFFSET 2 +#define DM_DMCONTROL_CLRRESETHALTREQ_LENGTH 1 +#define DM_DMCONTROL_CLRRESETHALTREQ (0x1U << DM_DMCONTROL_CLRRESETHALTREQ_OFFSET) +/* + * This bit controls the reset signal from the DM to the rest of the + * hardware platform. The signal should reset every part of the hardware platform, including + * every hart, except for the DM and any logic required to access the + * DM. + * To perform a hardware platform reset the debugger writes 1, + * and then writes 0 + * to deassert the reset. + */ +#define DM_DMCONTROL_NDMRESET_OFFSET 1 +#define DM_DMCONTROL_NDMRESET_LENGTH 1 +#define DM_DMCONTROL_NDMRESET (0x1U << DM_DMCONTROL_NDMRESET_OFFSET) +/* + * This bit serves as a reset signal for the Debug Module itself. + * After changing the value of this bit, the debugger must poll + * \RdmDmcontrol until \FdmDmcontrolDmactive has taken the requested value + * before performing any action that assumes the requested \FdmDmcontrolDmactive + * state change has completed. Hardware may + * take an arbitrarily long time to complete activation or deactivation and will + * indicate completion by setting \FdmDmcontrolDmactive to the requested value. + * + * 0: The module's state, including authentication mechanism, + * takes its reset values (the \FdmDmcontrolDmactive bit is the only bit which can + * be written to something other than its reset value). Any accesses + * to the module may fail. Specifically, \FdmDmstatusVersion might not return + * correct data. + * + * 1: The module functions normally. + * + * No other mechanism should exist that may result in resetting the + * Debug Module after power up. + * + * To place the Debug Module into a known state, a debugger may write 0 to \FdmDmcontrolDmactive, + * poll until \FdmDmcontrolDmactive is observed 0, write 1 to \FdmDmcontrolDmactive, and + * poll until \FdmDmcontrolDmactive is observed 1. + * + * Implementations may pay attention to this bit to further aid + * debugging, for example by preventing the Debug Module from being + * power gated while debugging is active. + */ +#define DM_DMCONTROL_DMACTIVE_OFFSET 0 +#define DM_DMCONTROL_DMACTIVE_LENGTH 1 +#define DM_DMCONTROL_DMACTIVE (0x1U << DM_DMCONTROL_DMACTIVE_OFFSET) +#define DM_HARTINFO 0x12 +/* + * Number of {\tt dscratch} registers available for the debugger + * to use during program buffer execution, starting from \RcsrDscratchZero. + * The debugger can make no assumptions about the contents of these + * registers between commands. + */ +#define DM_HARTINFO_NSCRATCH_OFFSET 20 +#define DM_HARTINFO_NSCRATCH_LENGTH 4 +#define DM_HARTINFO_NSCRATCH (0xfU << DM_HARTINFO_NSCRATCH_OFFSET) +/* + * 0: The {\tt data} registers are shadowed in the hart by CSRs. + * Each CSR is DXLEN bits in size, and corresponds + * to a single argument, per Table~\ref{tab:datareg}. + * + * 1: The {\tt data} registers are shadowed in the hart's memory map. + * Each register takes up 4 bytes in the memory map. + */ +#define DM_HARTINFO_DATAACCESS_OFFSET 16 +#define DM_HARTINFO_DATAACCESS_LENGTH 1 +#define DM_HARTINFO_DATAACCESS (0x1U << DM_HARTINFO_DATAACCESS_OFFSET) +/* + * If \FdmHartinfoDataaccess is 0: Number of CSRs dedicated to + * shadowing the {\tt data} registers. + * + * If \FdmHartinfoDataaccess is 1: Number of 32-bit words in the memory map + * dedicated to shadowing the {\tt data} registers. + * + * Since there are at most 12 {\tt data} registers, the value in this + * register must be 12 or smaller. + */ +#define DM_HARTINFO_DATASIZE_OFFSET 12 +#define DM_HARTINFO_DATASIZE_LENGTH 4 +#define DM_HARTINFO_DATASIZE (0xfU << DM_HARTINFO_DATASIZE_OFFSET) +/* + * If \FdmHartinfoDataaccess is 0: The number of the first CSR dedicated to + * shadowing the {\tt data} registers. + * + * If \FdmHartinfoDataaccess is 1: Address of RAM where the data + * registers are shadowed. This address is sign extended giving a + * range of -2048 to 2047, easily addressed with a load or store using + * \Xzero as the address register. + */ +#define DM_HARTINFO_DATAADDR_OFFSET 0 +#define DM_HARTINFO_DATAADDR_LENGTH 12 +#define DM_HARTINFO_DATAADDR (0xfffU << DM_HARTINFO_DATAADDR_OFFSET) +#define DM_HAWINDOWSEL 0x14 +/* + * The high bits of this field may be tied to 0, depending on how large + * the array mask register is. E.g.\ on a hardware platform with 48 harts only bit 0 + * of this field may actually be writable. + */ +#define DM_HAWINDOWSEL_HAWINDOWSEL_OFFSET 0 +#define DM_HAWINDOWSEL_HAWINDOWSEL_LENGTH 15 +#define DM_HAWINDOWSEL_HAWINDOWSEL (0x7fffU << DM_HAWINDOWSEL_HAWINDOWSEL_OFFSET) +#define DM_HAWINDOW 0x15 +#define DM_HAWINDOW_MASKDATA_OFFSET 0 +#define DM_HAWINDOW_MASKDATA_LENGTH 32 +#define DM_HAWINDOW_MASKDATA (0xffffffffU << DM_HAWINDOW_MASKDATA_OFFSET) +#define DM_ABSTRACTCS 0x16 +/* + * Size of the Program Buffer, in 32-bit words. Valid sizes are 0 - 16. + */ +#define DM_ABSTRACTCS_PROGBUFSIZE_OFFSET 24 +#define DM_ABSTRACTCS_PROGBUFSIZE_LENGTH 5 +#define DM_ABSTRACTCS_PROGBUFSIZE (0x1fU << DM_ABSTRACTCS_PROGBUFSIZE_OFFSET) +/* + * 1: An abstract command is currently being executed. + * + * This bit is set as soon as \RdmCommand is written, and is + * not cleared until that command has completed. + */ +#define DM_ABSTRACTCS_BUSY_OFFSET 12 +#define DM_ABSTRACTCS_BUSY_LENGTH 1 +#define DM_ABSTRACTCS_BUSY (0x1U << DM_ABSTRACTCS_BUSY_OFFSET) +/* + * This optional bit controls whether program buffer and abstract + * memory accesses are performed with the exact and full set of + * permission checks that apply based on the current architectural + * state of the hart performing the access, or with a relaxed set of + * permission checks (e.g. PMP restrictions are ignored). The + * details of the latter are implementation-specific. When set to 0, + * full permissions apply; when set to 1, relaxed permissions apply. + */ +#define DM_ABSTRACTCS_RELAXEDPRIV_OFFSET 11 +#define DM_ABSTRACTCS_RELAXEDPRIV_LENGTH 1 +#define DM_ABSTRACTCS_RELAXEDPRIV (0x1U << DM_ABSTRACTCS_RELAXEDPRIV_OFFSET) +/* + * Gets set if an abstract command fails. The bits in this field remain set until + * they are cleared by writing 1 to them. No abstract command is + * started until the value is reset to 0. + * + * This field only contains a valid value if \FdmAbstractcsBusy is 0. + * + * 0 (none): No error. + * + * 1 (busy): An abstract command was executing while \RdmCommand, + * \RdmAbstractcs, or \RdmAbstractauto was written, or when one + * of the {\tt data} or {\tt progbuf} registers was read or written. + * This status is only written if \FdmAbstractcsCmderr contains 0. + * + * 2 (not supported): The command in \RdmCommand is not supported. It + * may be supported with different options set, but it will not be + * supported at a later time when the hart or system state are + * different. + * + * 3 (exception): An exception occurred while executing the command + * (e.g.\ while executing the Program Buffer). + * + * 4 (halt/resume): The abstract command couldn't execute because the + * hart wasn't in the required state (running/halted), or unavailable. + * + * 5 (bus): The abstract command failed due to a bus error (e.g.\ + * alignment, access size, or timeout). + * + * 6: Reserved for future use. + * + * 7 (other): The command failed for another reason. + */ +#define DM_ABSTRACTCS_CMDERR_OFFSET 8 +#define DM_ABSTRACTCS_CMDERR_LENGTH 3 +#define DM_ABSTRACTCS_CMDERR (0x7U << DM_ABSTRACTCS_CMDERR_OFFSET) +/* + * Number of {\tt data} registers that are implemented as part of the + * abstract command interface. Valid sizes are 1 -- 12. + */ +#define DM_ABSTRACTCS_DATACOUNT_OFFSET 0 +#define DM_ABSTRACTCS_DATACOUNT_LENGTH 4 +#define DM_ABSTRACTCS_DATACOUNT (0xfU << DM_ABSTRACTCS_DATACOUNT_OFFSET) +#define DM_COMMAND 0x17 +/* + * The type determines the overall functionality of this + * abstract command. + */ +#define DM_COMMAND_CMDTYPE_OFFSET 24 +#define DM_COMMAND_CMDTYPE_LENGTH 8 +#define DM_COMMAND_CMDTYPE (0xffU << DM_COMMAND_CMDTYPE_OFFSET) +/* + * This field is interpreted in a command-specific manner, + * described for each abstract command. + */ +#define DM_COMMAND_CONTROL_OFFSET 0 +#define DM_COMMAND_CONTROL_LENGTH 24 +#define DM_COMMAND_CONTROL (0xffffffU << DM_COMMAND_CONTROL_OFFSET) +#define DM_ABSTRACTAUTO 0x18 +/* + * When a bit in this field is 1, read or write accesses to the + * corresponding {\tt progbuf} word cause the DM to act as if the + * current value in \RdmCommand was written there again after the + * access to {\tt progbuf} completes. + */ +#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET 16 +#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF_LENGTH 16 +#define DM_ABSTRACTAUTO_AUTOEXECPROGBUF (0xffffU << DM_ABSTRACTAUTO_AUTOEXECPROGBUF_OFFSET) +/* + * When a bit in this field is 1, read or write accesses to the + * corresponding {\tt data} word cause the DM to act as if the current + * value in \RdmCommand was written there again after the + * access to {\tt data} completes. + */ +#define DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET 0 +#define DM_ABSTRACTAUTO_AUTOEXECDATA_LENGTH 12 +#define DM_ABSTRACTAUTO_AUTOEXECDATA (0xfffU << DM_ABSTRACTAUTO_AUTOEXECDATA_OFFSET) +#define DM_CONFSTRPTR0 0x19 +#define DM_CONFSTRPTR0_ADDR_OFFSET 0 +#define DM_CONFSTRPTR0_ADDR_LENGTH 32 +#define DM_CONFSTRPTR0_ADDR (0xffffffffU << DM_CONFSTRPTR0_ADDR_OFFSET) +#define DM_CONFSTRPTR1 0x1a +#define DM_CONFSTRPTR1_ADDR_OFFSET 0 +#define DM_CONFSTRPTR1_ADDR_LENGTH 32 +#define DM_CONFSTRPTR1_ADDR (0xffffffffU << DM_CONFSTRPTR1_ADDR_OFFSET) +#define DM_CONFSTRPTR2 0x1b +#define DM_CONFSTRPTR2_ADDR_OFFSET 0 +#define DM_CONFSTRPTR2_ADDR_LENGTH 32 +#define DM_CONFSTRPTR2_ADDR (0xffffffffU << DM_CONFSTRPTR2_ADDR_OFFSET) +#define DM_CONFSTRPTR3 0x1c +#define DM_CONFSTRPTR3_ADDR_OFFSET 0 +#define DM_CONFSTRPTR3_ADDR_LENGTH 32 +#define DM_CONFSTRPTR3_ADDR (0xffffffffU << DM_CONFSTRPTR3_ADDR_OFFSET) +#define DM_NEXTDM 0x1d +#define DM_NEXTDM_ADDR_OFFSET 0 +#define DM_NEXTDM_ADDR_LENGTH 32 +#define DM_NEXTDM_ADDR (0xffffffffU << DM_NEXTDM_ADDR_OFFSET) +#define DM_DATA0 0x04 +#define DM_DATA0_DATA_OFFSET 0 +#define DM_DATA0_DATA_LENGTH 32 +#define DM_DATA0_DATA (0xffffffffU << DM_DATA0_DATA_OFFSET) +#define DM_DATA1 0x05 +#define DM_DATA2 0x06 +#define DM_DATA3 0x07 +#define DM_DATA4 0x08 +#define DM_DATA5 0x09 +#define DM_DATA6 0x0a +#define DM_DATA7 0x0b +#define DM_DATA8 0x0c +#define DM_DATA9 0x0d +#define DM_DATA10 0x0e +#define DM_DATA11 0x0f +#define DM_PROGBUF0 0x20 +#define DM_PROGBUF0_DATA_OFFSET 0 +#define DM_PROGBUF0_DATA_LENGTH 32 +#define DM_PROGBUF0_DATA (0xffffffffU << DM_PROGBUF0_DATA_OFFSET) +#define DM_PROGBUF1 0x21 +#define DM_PROGBUF2 0x22 +#define DM_PROGBUF3 0x23 +#define DM_PROGBUF4 0x24 +#define DM_PROGBUF5 0x25 +#define DM_PROGBUF6 0x26 +#define DM_PROGBUF7 0x27 +#define DM_PROGBUF8 0x28 +#define DM_PROGBUF9 0x29 +#define DM_PROGBUF10 0x2a +#define DM_PROGBUF11 0x2b +#define DM_PROGBUF12 0x2c +#define DM_PROGBUF13 0x2d +#define DM_PROGBUF14 0x2e +#define DM_PROGBUF15 0x2f +#define DM_AUTHDATA 0x30 +#define DM_AUTHDATA_DATA_OFFSET 0 +#define DM_AUTHDATA_DATA_LENGTH 32 +#define DM_AUTHDATA_DATA (0xffffffffU << DM_AUTHDATA_DATA_OFFSET) +#define DM_DMCS2 0x32 +/* + * 0: The remaining fields in this register configure halt groups. + * + * 1: The remaining fields in this register configure resume groups. + */ +#define DM_DMCS2_GROUPTYPE_OFFSET 11 +#define DM_DMCS2_GROUPTYPE_LENGTH 1 +#define DM_DMCS2_GROUPTYPE (0x1U << DM_DMCS2_GROUPTYPE_OFFSET) +/* + * This field contains the currently selected DM external trigger. + * + * If a non-existent trigger value is written here, the hardware will + * change it to a valid one or 0 if no DM external triggers exist. + */ +#define DM_DMCS2_DMEXTTRIGGER_OFFSET 7 +#define DM_DMCS2_DMEXTTRIGGER_LENGTH 4 +#define DM_DMCS2_DMEXTTRIGGER (0xfU << DM_DMCS2_DMEXTTRIGGER_OFFSET) +/* + * When \FdmDmcsTwoHgselect is 0, contains the group of the hart + * specified by \Fhartsel. + * + * When \FdmDmcsTwoHgselect is 1, contains the group of the DM external + * trigger selected by \FdmDmcsTwoDmexttrigger. + * + * The value written to this field is ignored unless \FdmDmcsTwoHgwrite + * is also written 1. + * + * Group numbers are contiguous starting at 0, with the highest number + * being implementation-dependent, and possibly different between + * different group types. Debuggers should read back this field after + * writing to confirm they are using a hart group that is supported. + * + * If groups aren't implemented, then this entire field is 0. + */ +#define DM_DMCS2_GROUP_OFFSET 2 +#define DM_DMCS2_GROUP_LENGTH 5 +#define DM_DMCS2_GROUP (0x1fU << DM_DMCS2_GROUP_OFFSET) +/* + * When 1 is written and \FdmDmcsTwoHgselect is 0, for every selected + * hart the DM will change its group to the value written to \FdmDmcsTwoGroup, + * if the hardware supports that group for that hart. + * Implementations may also change the group of a minimal set of + * unselected harts in the same way, if that is necessary due to + * a hardware limitation. + * + * When 1 is written and \FdmDmcsTwoHgselect is 1, the DM will change + * the group of the DM external trigger selected by \FdmDmcsTwoDmexttrigger + * to the value written to \FdmDmcsTwoGroup, if the hardware supports + * that group for that trigger. + * + * Writing 0 has no effect. + */ +#define DM_DMCS2_HGWRITE_OFFSET 1 +#define DM_DMCS2_HGWRITE_LENGTH 1 +#define DM_DMCS2_HGWRITE (0x1U << DM_DMCS2_HGWRITE_OFFSET) +/* + * 0: Operate on harts. + * + * 1: Operate on DM external triggers. + * + * If there are no DM external triggers, this field must be tied to 0. + */ +#define DM_DMCS2_HGSELECT_OFFSET 0 +#define DM_DMCS2_HGSELECT_LENGTH 1 +#define DM_DMCS2_HGSELECT (0x1U << DM_DMCS2_HGSELECT_OFFSET) +#define DM_HALTSUM0 0x40 +#define DM_HALTSUM0_HALTSUM0_OFFSET 0 +#define DM_HALTSUM0_HALTSUM0_LENGTH 32 +#define DM_HALTSUM0_HALTSUM0 (0xffffffffU << DM_HALTSUM0_HALTSUM0_OFFSET) +#define DM_HALTSUM1 0x13 +#define DM_HALTSUM1_HALTSUM1_OFFSET 0 +#define DM_HALTSUM1_HALTSUM1_LENGTH 32 +#define DM_HALTSUM1_HALTSUM1 (0xffffffffU << DM_HALTSUM1_HALTSUM1_OFFSET) +#define DM_HALTSUM2 0x34 +#define DM_HALTSUM2_HALTSUM2_OFFSET 0 +#define DM_HALTSUM2_HALTSUM2_LENGTH 32 +#define DM_HALTSUM2_HALTSUM2 (0xffffffffU << DM_HALTSUM2_HALTSUM2_OFFSET) +#define DM_HALTSUM3 0x35 +#define DM_HALTSUM3_HALTSUM3_OFFSET 0 +#define DM_HALTSUM3_HALTSUM3_LENGTH 32 +#define DM_HALTSUM3_HALTSUM3 (0xffffffffU << DM_HALTSUM3_HALTSUM3_OFFSET) +#define DM_SBCS 0x38 +/* + * 0: The System Bus interface conforms to mainline drafts of this + * spec older than 1 January, 2018. + * + * 1: The System Bus interface conforms to this version of the spec. + * + * Other values are reserved for future versions. + */ +#define DM_SBCS_SBVERSION_OFFSET 29 +#define DM_SBCS_SBVERSION_LENGTH 3 +#define DM_SBCS_SBVERSION (0x7U << DM_SBCS_SBVERSION_OFFSET) +/* + * Set when the debugger attempts to read data while a read is in + * progress, or when the debugger initiates a new access while one is + * already in progress (while \FdmSbcsSbbusy is set). It remains set until + * it's explicitly cleared by the debugger. + * + * While this field is set, no more system bus accesses can be + * initiated by the Debug Module. + */ +#define DM_SBCS_SBBUSYERROR_OFFSET 22 +#define DM_SBCS_SBBUSYERROR_LENGTH 1 +#define DM_SBCS_SBBUSYERROR (0x1U << DM_SBCS_SBBUSYERROR_OFFSET) +/* + * When 1, indicates the system bus master is busy. (Whether the + * system bus itself is busy is related, but not the same thing.) This + * bit goes high immediately when a read or write is requested for any + * reason, and does not go low until the access is fully completed. + * + * Writes to \RdmSbcs while \FdmSbcsSbbusy is high result in undefined + * behavior. A debugger must not write to \RdmSbcs until it reads + * \FdmSbcsSbbusy as 0. + */ +#define DM_SBCS_SBBUSY_OFFSET 21 +#define DM_SBCS_SBBUSY_LENGTH 1 +#define DM_SBCS_SBBUSY (0x1U << DM_SBCS_SBBUSY_OFFSET) +/* + * When 1, every write to \RdmSbaddressZero automatically triggers a + * system bus read at the new address. + */ +#define DM_SBCS_SBREADONADDR_OFFSET 20 +#define DM_SBCS_SBREADONADDR_LENGTH 1 +#define DM_SBCS_SBREADONADDR (0x1U << DM_SBCS_SBREADONADDR_OFFSET) +/* + * Select the access size to use for system bus accesses. + * + * 0: 8-bit + * + * 1: 16-bit + * + * 2: 32-bit + * + * 3: 64-bit + * + * 4: 128-bit + * + * If \FdmSbcsSbaccess has an unsupported value when the DM starts a bus + * access, the access is not performed and \FdmSbcsSberror is set to 4. + */ +#define DM_SBCS_SBACCESS_OFFSET 17 +#define DM_SBCS_SBACCESS_LENGTH 3 +#define DM_SBCS_SBACCESS (0x7U << DM_SBCS_SBACCESS_OFFSET) +/* + * When 1, {\tt sbaddress} is incremented by the access size (in + * bytes) selected in \FdmSbcsSbaccess after every system bus access. + */ +#define DM_SBCS_SBAUTOINCREMENT_OFFSET 16 +#define DM_SBCS_SBAUTOINCREMENT_LENGTH 1 +#define DM_SBCS_SBAUTOINCREMENT (0x1U << DM_SBCS_SBAUTOINCREMENT_OFFSET) +/* + * When 1, every read from \RdmSbdataZero automatically triggers a + * system bus read at the (possibly auto-incremented) address. + */ +#define DM_SBCS_SBREADONDATA_OFFSET 15 +#define DM_SBCS_SBREADONDATA_LENGTH 1 +#define DM_SBCS_SBREADONDATA (0x1U << DM_SBCS_SBREADONDATA_OFFSET) +/* + * When the Debug Module's system bus + * master encounters an error, this field gets set. The bits in this + * field remain set until they are cleared by writing 1 to them. + * While this field is non-zero, no more system bus accesses can be + * initiated by the Debug Module. + * + * An implementation may report ``Other'' (7) for any error condition. + * + * 0: There was no bus error. + * + * 1: There was a timeout. + * + * 2: A bad address was accessed. + * + * 3: There was an alignment error. + * + * 4: An access of unsupported size was requested. + * + * 7: Other. + */ +#define DM_SBCS_SBERROR_OFFSET 12 +#define DM_SBCS_SBERROR_LENGTH 3 +#define DM_SBCS_SBERROR (0x7U << DM_SBCS_SBERROR_OFFSET) +/* + * Width of system bus addresses in bits. (0 indicates there is no bus + * access support.) + */ +#define DM_SBCS_SBASIZE_OFFSET 5 +#define DM_SBCS_SBASIZE_LENGTH 7 +#define DM_SBCS_SBASIZE (0x7fU << DM_SBCS_SBASIZE_OFFSET) +/* + * 1 when 128-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS128_OFFSET 4 +#define DM_SBCS_SBACCESS128_LENGTH 1 +#define DM_SBCS_SBACCESS128 (0x1U << DM_SBCS_SBACCESS128_OFFSET) +/* + * 1 when 64-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS64_OFFSET 3 +#define DM_SBCS_SBACCESS64_LENGTH 1 +#define DM_SBCS_SBACCESS64 (0x1U << DM_SBCS_SBACCESS64_OFFSET) +/* + * 1 when 32-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS32_OFFSET 2 +#define DM_SBCS_SBACCESS32_LENGTH 1 +#define DM_SBCS_SBACCESS32 (0x1U << DM_SBCS_SBACCESS32_OFFSET) +/* + * 1 when 16-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS16_OFFSET 1 +#define DM_SBCS_SBACCESS16_LENGTH 1 +#define DM_SBCS_SBACCESS16 (0x1U << DM_SBCS_SBACCESS16_OFFSET) +/* + * 1 when 8-bit system bus accesses are supported. + */ +#define DM_SBCS_SBACCESS8_OFFSET 0 +#define DM_SBCS_SBACCESS8_LENGTH 1 +#define DM_SBCS_SBACCESS8 (0x1U << DM_SBCS_SBACCESS8_OFFSET) +#define DM_SBADDRESS0 0x39 +/* + * Accesses bits 31:0 of the physical address in {\tt sbaddress}. + */ +#define DM_SBADDRESS0_ADDRESS_OFFSET 0 +#define DM_SBADDRESS0_ADDRESS_LENGTH 32 +#define DM_SBADDRESS0_ADDRESS (0xffffffffU << DM_SBADDRESS0_ADDRESS_OFFSET) +#define DM_SBADDRESS1 0x3a +/* + * Accesses bits 63:32 of the physical address in {\tt sbaddress} (if + * the system address bus is that wide). + */ +#define DM_SBADDRESS1_ADDRESS_OFFSET 0 +#define DM_SBADDRESS1_ADDRESS_LENGTH 32 +#define DM_SBADDRESS1_ADDRESS (0xffffffffU << DM_SBADDRESS1_ADDRESS_OFFSET) +#define DM_SBADDRESS2 0x3b +/* + * Accesses bits 95:64 of the physical address in {\tt sbaddress} (if + * the system address bus is that wide). + */ +#define DM_SBADDRESS2_ADDRESS_OFFSET 0 +#define DM_SBADDRESS2_ADDRESS_LENGTH 32 +#define DM_SBADDRESS2_ADDRESS (0xffffffffU << DM_SBADDRESS2_ADDRESS_OFFSET) +#define DM_SBADDRESS3 0x37 +/* + * Accesses bits 127:96 of the physical address in {\tt sbaddress} (if + * the system address bus is that wide). + */ +#define DM_SBADDRESS3_ADDRESS_OFFSET 0 +#define DM_SBADDRESS3_ADDRESS_LENGTH 32 +#define DM_SBADDRESS3_ADDRESS (0xffffffffU << DM_SBADDRESS3_ADDRESS_OFFSET) +#define DM_SBDATA0 0x3c +/* + * Accesses bits 31:0 of {\tt sbdata}. + */ +#define DM_SBDATA0_DATA_OFFSET 0 +#define DM_SBDATA0_DATA_LENGTH 32 +#define DM_SBDATA0_DATA (0xffffffffU << DM_SBDATA0_DATA_OFFSET) +#define DM_SBDATA1 0x3d +/* + * Accesses bits 63:32 of {\tt sbdata} (if the system bus is that + * wide). + */ +#define DM_SBDATA1_DATA_OFFSET 0 +#define DM_SBDATA1_DATA_LENGTH 32 +#define DM_SBDATA1_DATA (0xffffffffU << DM_SBDATA1_DATA_OFFSET) +#define DM_SBDATA2 0x3e +/* + * Accesses bits 95:64 of {\tt sbdata} (if the system bus is that + * wide). + */ +#define DM_SBDATA2_DATA_OFFSET 0 +#define DM_SBDATA2_DATA_LENGTH 32 +#define DM_SBDATA2_DATA (0xffffffffU << DM_SBDATA2_DATA_OFFSET) +#define DM_SBDATA3 0x3f +/* + * Accesses bits 127:96 of {\tt sbdata} (if the system bus is that + * wide). + */ +#define DM_SBDATA3_DATA_OFFSET 0 +#define DM_SBDATA3_DATA_LENGTH 32 +#define DM_SBDATA3_DATA (0xffffffffU << DM_SBDATA3_DATA_OFFSET) +#define DM_CUSTOM 0x1f +#define DM_CUSTOM0 0x70 +#define DM_CUSTOM1 0x71 +#define DM_CUSTOM2 0x72 +#define DM_CUSTOM3 0x73 +#define DM_CUSTOM4 0x74 +#define DM_CUSTOM5 0x75 +#define DM_CUSTOM6 0x76 +#define DM_CUSTOM7 0x77 +#define DM_CUSTOM8 0x78 +#define DM_CUSTOM9 0x79 +#define DM_CUSTOM10 0x7a +#define DM_CUSTOM11 0x7b +#define DM_CUSTOM12 0x7c +#define DM_CUSTOM13 0x7d +#define DM_CUSTOM14 0x7e +#define DM_CUSTOM15 0x7f +#define SHORTNAME 0x123 +/* + * Description of what this field is used for. + */ +#define SHORTNAME_FIELD_OFFSET 0 +#define SHORTNAME_FIELD_LENGTH 8 +#define SHORTNAME_FIELD (0xffU << SHORTNAME_FIELD_OFFSET) +/* + * This is 0 to indicate Access Register Command. + */ +#define AC_ACCESS_REGISTER_CMDTYPE_OFFSET 24 +#define AC_ACCESS_REGISTER_CMDTYPE_LENGTH 8 +#define AC_ACCESS_REGISTER_CMDTYPE (0xffU << AC_ACCESS_REGISTER_CMDTYPE_OFFSET) +/* + * 2: Access the lowest 32 bits of the register. + * + * 3: Access the lowest 64 bits of the register. + * + * 4: Access the lowest 128 bits of the register. + * + * If \FacAccessregisterAarsize specifies a size larger than the register's actual size, + * then the access must fail. If a register is accessible, then reads of \FacAccessregisterAarsize + * less than or equal to the register's actual size must be supported. + * Writing less than the full register may be supported, but what + * happens to the high bits in that case is \unspecified. + * + * This field controls the Argument Width as referenced in + * Table~\ref{tab:datareg}. + */ +#define AC_ACCESS_REGISTER_AARSIZE_OFFSET 20 +#define AC_ACCESS_REGISTER_AARSIZE_LENGTH 3 +#define AC_ACCESS_REGISTER_AARSIZE (0x7U << AC_ACCESS_REGISTER_AARSIZE_OFFSET) +/* + * 0: No effect. This variant must be supported. + * + * 1: After a successful register access, \FacAccessregisterRegno is + * incremented. Incrementing past the highest supported value + * causes \FacAccessregisterRegno to become \unspecified. Supporting + * this variant is optional. It is undefined whether the increment + * happens when \FacAccessregisterTransfer is 0. + */ +#define AC_ACCESS_REGISTER_AARPOSTINCREMENT_OFFSET 19 +#define AC_ACCESS_REGISTER_AARPOSTINCREMENT_LENGTH 1 +#define AC_ACCESS_REGISTER_AARPOSTINCREMENT (0x1U << AC_ACCESS_REGISTER_AARPOSTINCREMENT_OFFSET) +/* + * 0: No effect. This variant must be supported, and is the only + * supported one if \FdmAbstractcsProgbufsize is 0. + * + * 1: Execute the program in the Program Buffer exactly once after + * performing the transfer, if any. Supporting this variant is + * optional. + */ +#define AC_ACCESS_REGISTER_POSTEXEC_OFFSET 18 +#define AC_ACCESS_REGISTER_POSTEXEC_LENGTH 1 +#define AC_ACCESS_REGISTER_POSTEXEC (0x1U << AC_ACCESS_REGISTER_POSTEXEC_OFFSET) +/* + * 0: Don't do the operation specified by \FacAccessregisterWrite. + * + * 1: Do the operation specified by \FacAccessregisterWrite. + * + * This bit can be used to just execute the Program Buffer without + * having to worry about placing valid values into \FacAccessregisterAarsize or \FacAccessregisterRegno. + */ +#define AC_ACCESS_REGISTER_TRANSFER_OFFSET 17 +#define AC_ACCESS_REGISTER_TRANSFER_LENGTH 1 +#define AC_ACCESS_REGISTER_TRANSFER (0x1U << AC_ACCESS_REGISTER_TRANSFER_OFFSET) +/* + * When \FacAccessregisterTransfer is set: + * 0: Copy data from the specified register into {\tt arg0} portion + * of {\tt data}. + * + * 1: Copy data from {\tt arg0} portion of {\tt data} into the + * specified register. + */ +#define AC_ACCESS_REGISTER_WRITE_OFFSET 16 +#define AC_ACCESS_REGISTER_WRITE_LENGTH 1 +#define AC_ACCESS_REGISTER_WRITE (0x1U << AC_ACCESS_REGISTER_WRITE_OFFSET) +/* + * Number of the register to access, as described in + * Table~\ref{tab:regno}. + * \RcsrDpc may be used as an alias for PC if this command is + * supported on a non-halted hart. + */ +#define AC_ACCESS_REGISTER_REGNO_OFFSET 0 +#define AC_ACCESS_REGISTER_REGNO_LENGTH 16 +#define AC_ACCESS_REGISTER_REGNO (0xffffU << AC_ACCESS_REGISTER_REGNO_OFFSET) +/* + * This is 1 to indicate Quick Access command. + */ +#define AC_QUICK_ACCESS_CMDTYPE_OFFSET 24 +#define AC_QUICK_ACCESS_CMDTYPE_LENGTH 8 +#define AC_QUICK_ACCESS_CMDTYPE (0xffU << AC_QUICK_ACCESS_CMDTYPE_OFFSET) +/* + * This is 2 to indicate Access Memory Command. + */ +#define AC_ACCESS_MEMORY_CMDTYPE_OFFSET 24 +#define AC_ACCESS_MEMORY_CMDTYPE_LENGTH 8 +#define AC_ACCESS_MEMORY_CMDTYPE (0xffU << AC_ACCESS_MEMORY_CMDTYPE_OFFSET) +/* + * An implementation does not have to implement both virtual and + * physical accesses, but it must fail accesses that it doesn't + * support. + * + * 0: Addresses are physical (to the hart they are performed on). + * + * 1: Addresses are virtual, and translated the way they would be from + * M-mode, with \FcsrMstatusMprv set. + * + * Debug Modules on systems without address translation (i.e. virtual addresses equal physical) + * may optionally allow \FacAccessmemoryAamvirtual set to 1, which would produce the same result as + * that same abstract command with \FacAccessmemoryAamvirtual cleared. + */ +#define AC_ACCESS_MEMORY_AAMVIRTUAL_OFFSET 23 +#define AC_ACCESS_MEMORY_AAMVIRTUAL_LENGTH 1 +#define AC_ACCESS_MEMORY_AAMVIRTUAL (0x1U << AC_ACCESS_MEMORY_AAMVIRTUAL_OFFSET) +/* + * 0: Access the lowest 8 bits of the memory location. + * + * 1: Access the lowest 16 bits of the memory location. + * + * 2: Access the lowest 32 bits of the memory location. + * + * 3: Access the lowest 64 bits of the memory location. + * + * 4: Access the lowest 128 bits of the memory location. + */ +#define AC_ACCESS_MEMORY_AAMSIZE_OFFSET 20 +#define AC_ACCESS_MEMORY_AAMSIZE_LENGTH 3 +#define AC_ACCESS_MEMORY_AAMSIZE (0x7U << AC_ACCESS_MEMORY_AAMSIZE_OFFSET) +/* + * After a memory access has completed, if this bit is 1, increment + * {\tt arg1} (which contains the address used) by the number of bytes + * encoded in \FacAccessmemoryAamsize. + * + * Supporting this variant is optional, but highly recommended for + * performance reasons. + */ +#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT_OFFSET 19 +#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT_LENGTH 1 +#define AC_ACCESS_MEMORY_AAMPOSTINCREMENT (0x1U << AC_ACCESS_MEMORY_AAMPOSTINCREMENT_OFFSET) +/* + * 0: Copy data from the memory location specified in {\tt arg1} into + * the low bits of {\tt arg0}. Any remaining bits of {\tt arg0} now + * have an undefined value. + * + * 1: Copy data from the low bits of {\tt arg0} into the memory + * location specified in {\tt arg1}. + */ +#define AC_ACCESS_MEMORY_WRITE_OFFSET 16 +#define AC_ACCESS_MEMORY_WRITE_LENGTH 1 +#define AC_ACCESS_MEMORY_WRITE (0x1U << AC_ACCESS_MEMORY_WRITE_OFFSET) +/* + * These bits are reserved for target-specific uses. + */ +#define AC_ACCESS_MEMORY_TARGET_SPECIFIC_OFFSET 14 +#define AC_ACCESS_MEMORY_TARGET_SPECIFIC_LENGTH 2 +#define AC_ACCESS_MEMORY_TARGET_SPECIFIC (0x3U << AC_ACCESS_MEMORY_TARGET_SPECIFIC_OFFSET) +#define VIRT_PRIV virtual +/* + * Contains the virtualization mode the hart was operating in when Debug + * Mode was entered. The encoding is described in Table \ref{tab:privmode}, + * and matches the virtualization mode encoding from the Privileged Spec. + * A user can write this value to change the hart's virtualization mode + * when exiting Debug Mode. + */ +#define VIRT_PRIV_V_OFFSET 2 +#define VIRT_PRIV_V_LENGTH 1 +#define VIRT_PRIV_V (0x1U << VIRT_PRIV_V_OFFSET) +/* + * Contains the privilege mode the hart was operating in when Debug + * Mode was entered. The encoding is described in Table + * \ref{tab:privmode}, and matches the privilege mode encoding from + * the Privileged Spec. A user can write this + * value to change the hart's privilege mode when exiting Debug Mode. + */ +#define VIRT_PRIV_PRV_OFFSET 0 +#define VIRT_PRIV_PRV_LENGTH 2 +#define VIRT_PRIV_PRV (0x3U << VIRT_PRIV_PRV_OFFSET) +#define DMI_SERCS 0x34 +/* + * Number of supported serial ports. + */ +#define DMI_SERCS_SERIALCOUNT_OFFSET 28 +#define DMI_SERCS_SERIALCOUNT_LENGTH 4 +#define DMI_SERCS_SERIALCOUNT (0xfU << DMI_SERCS_SERIALCOUNT_OFFSET) +/* + * Select which serial port is accessed by \RdmiSerrx and \RdmiSertx. + */ +#define DMI_SERCS_SERIAL_OFFSET 24 +#define DMI_SERCS_SERIAL_LENGTH 3 +#define DMI_SERCS_SERIAL (0x7U << DMI_SERCS_SERIAL_OFFSET) +#define DMI_SERCS_ERROR7_OFFSET 23 +#define DMI_SERCS_ERROR7_LENGTH 1 +#define DMI_SERCS_ERROR7 (0x1U << DMI_SERCS_ERROR7_OFFSET) +#define DMI_SERCS_VALID7_OFFSET 22 +#define DMI_SERCS_VALID7_LENGTH 1 +#define DMI_SERCS_VALID7 (0x1U << DMI_SERCS_VALID7_OFFSET) +#define DMI_SERCS_FULL7_OFFSET 21 +#define DMI_SERCS_FULL7_LENGTH 1 +#define DMI_SERCS_FULL7 (0x1U << DMI_SERCS_FULL7_OFFSET) +#define DMI_SERCS_ERROR6_OFFSET 20 +#define DMI_SERCS_ERROR6_LENGTH 1 +#define DMI_SERCS_ERROR6 (0x1U << DMI_SERCS_ERROR6_OFFSET) +#define DMI_SERCS_VALID6_OFFSET 19 +#define DMI_SERCS_VALID6_LENGTH 1 +#define DMI_SERCS_VALID6 (0x1U << DMI_SERCS_VALID6_OFFSET) +#define DMI_SERCS_FULL6_OFFSET 18 +#define DMI_SERCS_FULL6_LENGTH 1 +#define DMI_SERCS_FULL6 (0x1U << DMI_SERCS_FULL6_OFFSET) +#define DMI_SERCS_ERROR5_OFFSET 17 +#define DMI_SERCS_ERROR5_LENGTH 1 +#define DMI_SERCS_ERROR5 (0x1U << DMI_SERCS_ERROR5_OFFSET) +#define DMI_SERCS_VALID5_OFFSET 16 +#define DMI_SERCS_VALID5_LENGTH 1 +#define DMI_SERCS_VALID5 (0x1U << DMI_SERCS_VALID5_OFFSET) +#define DMI_SERCS_FULL5_OFFSET 15 +#define DMI_SERCS_FULL5_LENGTH 1 +#define DMI_SERCS_FULL5 (0x1U << DMI_SERCS_FULL5_OFFSET) +#define DMI_SERCS_ERROR4_OFFSET 14 +#define DMI_SERCS_ERROR4_LENGTH 1 +#define DMI_SERCS_ERROR4 (0x1U << DMI_SERCS_ERROR4_OFFSET) +#define DMI_SERCS_VALID4_OFFSET 13 +#define DMI_SERCS_VALID4_LENGTH 1 +#define DMI_SERCS_VALID4 (0x1U << DMI_SERCS_VALID4_OFFSET) +#define DMI_SERCS_FULL4_OFFSET 12 +#define DMI_SERCS_FULL4_LENGTH 1 +#define DMI_SERCS_FULL4 (0x1U << DMI_SERCS_FULL4_OFFSET) +#define DMI_SERCS_ERROR3_OFFSET 11 +#define DMI_SERCS_ERROR3_LENGTH 1 +#define DMI_SERCS_ERROR3 (0x1U << DMI_SERCS_ERROR3_OFFSET) +#define DMI_SERCS_VALID3_OFFSET 10 +#define DMI_SERCS_VALID3_LENGTH 1 +#define DMI_SERCS_VALID3 (0x1U << DMI_SERCS_VALID3_OFFSET) +#define DMI_SERCS_FULL3_OFFSET 9 +#define DMI_SERCS_FULL3_LENGTH 1 +#define DMI_SERCS_FULL3 (0x1U << DMI_SERCS_FULL3_OFFSET) +#define DMI_SERCS_ERROR2_OFFSET 8 +#define DMI_SERCS_ERROR2_LENGTH 1 +#define DMI_SERCS_ERROR2 (0x1U << DMI_SERCS_ERROR2_OFFSET) +#define DMI_SERCS_VALID2_OFFSET 7 +#define DMI_SERCS_VALID2_LENGTH 1 +#define DMI_SERCS_VALID2 (0x1U << DMI_SERCS_VALID2_OFFSET) +#define DMI_SERCS_FULL2_OFFSET 6 +#define DMI_SERCS_FULL2_LENGTH 1 +#define DMI_SERCS_FULL2 (0x1U << DMI_SERCS_FULL2_OFFSET) +#define DMI_SERCS_ERROR1_OFFSET 5 +#define DMI_SERCS_ERROR1_LENGTH 1 +#define DMI_SERCS_ERROR1 (0x1U << DMI_SERCS_ERROR1_OFFSET) +#define DMI_SERCS_VALID1_OFFSET 4 +#define DMI_SERCS_VALID1_LENGTH 1 +#define DMI_SERCS_VALID1 (0x1U << DMI_SERCS_VALID1_OFFSET) +#define DMI_SERCS_FULL1_OFFSET 3 +#define DMI_SERCS_FULL1_LENGTH 1 +#define DMI_SERCS_FULL1 (0x1U << DMI_SERCS_FULL1_OFFSET) +/* + * 1 when the debugger-to-core queue for serial port 0 has + * over or underflowed. This bit will remain set until it is reset by + * writing 1 to this bit. + */ +#define DMI_SERCS_ERROR0_OFFSET 2 +#define DMI_SERCS_ERROR0_LENGTH 1 +#define DMI_SERCS_ERROR0 (0x1U << DMI_SERCS_ERROR0_OFFSET) +/* + * 1 when the core-to-debugger queue for serial port 0 is not empty. + */ +#define DMI_SERCS_VALID0_OFFSET 1 +#define DMI_SERCS_VALID0_LENGTH 1 +#define DMI_SERCS_VALID0 (0x1U << DMI_SERCS_VALID0_OFFSET) +/* + * 1 when the debugger-to-core queue for serial port 0 is full. + */ +#define DMI_SERCS_FULL0_OFFSET 0 +#define DMI_SERCS_FULL0_LENGTH 1 +#define DMI_SERCS_FULL0 (0x1U << DMI_SERCS_FULL0_OFFSET) +#define DMI_SERTX 0x35 +#define DMI_SERTX_DATA_OFFSET 0 +#define DMI_SERTX_DATA_LENGTH 32 +#define DMI_SERTX_DATA (0xffffffffU << DMI_SERTX_DATA_OFFSET) +#define DMI_SERRX 0x36 +#define DMI_SERRX_DATA_OFFSET 0 +#define DMI_SERRX_DATA_LENGTH 32 +#define DMI_SERRX_DATA (0xffffffffU << DMI_SERRX_DATA_OFFSET) diff --git a/vendor/riscv-isa-sim/riscv/debug_module.cc b/vendor/riscv-isa-sim/riscv/debug_module.cc new file mode 100644 index 00000000..0eac8424 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/debug_module.cc @@ -0,0 +1,938 @@ +#include + +#include "sim.h" +#include "debug_module.h" +#include "debug_defines.h" +#include "opcodes.h" +#include "mmu.h" + +#include "debug_rom/debug_rom.h" +#include "debug_rom_defines.h" + +#if 0 +# define D(x) x +#else +# define D(x) +#endif + +// Return the number of bits wide that a field has to be to encode up to n +// different values. +// 1->0, 2->1, 3->2, 4->2 +static unsigned field_width(unsigned n) +{ + unsigned i = 0; + n -= 1; + while (n) { + i++; + n >>= 1; + } + return i; +} + +///////////////////////// debug_module_t + +debug_module_t::debug_module_t(sim_t *sim, const debug_module_config_t &config) : + nprocs(sim->nprocs()), + config(config), + program_buffer_bytes((config.support_impebreak ? 4 : 0) + 4*config.progbufsize), + debug_progbuf_start(debug_data_start - program_buffer_bytes), + debug_abstract_start(debug_progbuf_start - debug_abstract_size*4), + custom_base(0), + hartsellen(field_width(sim->nprocs())), + sim(sim), + // The spec lets a debugger select nonexistent harts. Create hart_state for + // them because I'm too lazy to add the code to just ignore accesses. + hart_state(1 << field_width(sim->nprocs())), + hart_array_mask(sim->nprocs()), + rti_remaining(0) +{ + D(fprintf(stderr, "debug_data_start=0x%x\n", debug_data_start)); + D(fprintf(stderr, "debug_progbuf_start=0x%x\n", debug_progbuf_start)); + D(fprintf(stderr, "debug_abstract_start=0x%x\n", debug_abstract_start)); + + assert(nprocs <= 1024); + + program_buffer = new uint8_t[program_buffer_bytes]; + + memset(debug_rom_flags, 0, sizeof(debug_rom_flags)); + memset(program_buffer, 0, program_buffer_bytes); + memset(dmdata, 0, sizeof(dmdata)); + + if (config.support_impebreak) { + program_buffer[4*config.progbufsize] = ebreak(); + program_buffer[4*config.progbufsize+1] = ebreak() >> 8; + program_buffer[4*config.progbufsize+2] = ebreak() >> 16; + program_buffer[4*config.progbufsize+3] = ebreak() >> 24; + } + + write32(debug_rom_whereto, 0, + jal(ZERO, debug_abstract_start - DEBUG_ROM_WHERETO)); + + memset(debug_abstract, 0, sizeof(debug_abstract)); + + reset(); +} + +debug_module_t::~debug_module_t() +{ + delete[] program_buffer; +} + +void debug_module_t::reset() +{ + assert(sim->nprocs() > 0); + for (unsigned i = 0; i < sim->nprocs(); i++) { + processor_t *proc = sim->get_core(i); + if (proc) + proc->halt_request = proc->HR_NONE; + } + + memset(&dmcontrol, 0, sizeof(dmcontrol)); + + memset(&dmstatus, 0, sizeof(dmstatus)); + dmstatus.impebreak = config.support_impebreak; + dmstatus.authenticated = !config.require_authentication; + dmstatus.version = 2; + + memset(&abstractcs, 0, sizeof(abstractcs)); + abstractcs.datacount = sizeof(dmdata) / 4; + abstractcs.progbufsize = config.progbufsize; + + memset(&abstractauto, 0, sizeof(abstractauto)); + + memset(&sbcs, 0, sizeof(sbcs)); + if (config.max_sba_data_width > 0) { + sbcs.version = 1; + sbcs.asize = sizeof(reg_t) * 8; + } + if (config.max_sba_data_width >= 64) + sbcs.access64 = true; + if (config.max_sba_data_width >= 32) + sbcs.access32 = true; + if (config.max_sba_data_width >= 16) + sbcs.access16 = true; + if (config.max_sba_data_width >= 8) + sbcs.access8 = true; + + challenge = random(); +} + +void debug_module_t::add_device(bus_t *bus) { + bus->add_device(DEBUG_START, this); +} + +bool debug_module_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + addr = DEBUG_START + addr; + + if (addr >= DEBUG_ROM_ENTRY && + (addr + len) <= (DEBUG_ROM_ENTRY + debug_rom_raw_len)) { + memcpy(bytes, debug_rom_raw + addr - DEBUG_ROM_ENTRY, len); + return true; + } + + if (addr >= DEBUG_ROM_WHERETO && (addr + len) <= (DEBUG_ROM_WHERETO + 4)) { + memcpy(bytes, debug_rom_whereto + addr - DEBUG_ROM_WHERETO, len); + return true; + } + + if (addr >= DEBUG_ROM_FLAGS && ((addr + len) <= DEBUG_ROM_FLAGS + 1024)) { + memcpy(bytes, debug_rom_flags + addr - DEBUG_ROM_FLAGS, len); + return true; + } + + if (addr >= debug_abstract_start && ((addr + len) <= (debug_abstract_start + sizeof(debug_abstract)))) { + memcpy(bytes, debug_abstract + addr - debug_abstract_start, len); + return true; + } + + if (addr >= debug_data_start && (addr + len) <= (debug_data_start + sizeof(dmdata))) { + memcpy(bytes, dmdata + addr - debug_data_start, len); + return true; + } + + if (addr >= debug_progbuf_start && ((addr + len) <= (debug_progbuf_start + program_buffer_bytes))) { + memcpy(bytes, program_buffer + addr - debug_progbuf_start, len); + return true; + } + + D(fprintf(stderr, "ERROR: invalid load from debug module: %zd bytes at 0x%016" + PRIx64 "\n", len, addr)); + + return false; +} + +bool debug_module_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + D( + switch (len) { + case 4: + fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=0x%08x); " + "hartsel=0x%x\n", addr, (unsigned) len, *(uint32_t *) bytes, + dmcontrol.hartsel); + break; + default: + fprintf(stderr, "store(addr=0x%lx, len=%d, bytes=...); " + "hartsel=0x%x\n", addr, (unsigned) len, dmcontrol.hartsel); + break; + } + ); + + uint8_t id_bytes[4]; + uint32_t id = 0; + if (len == 4) { + memcpy(id_bytes, bytes, 4); + id = read32(id_bytes, 0); + } + + addr = DEBUG_START + addr; + + if (addr >= debug_data_start && (addr + len) <= (debug_data_start + sizeof(dmdata))) { + memcpy(dmdata + addr - debug_data_start, bytes, len); + return true; + } + + if (addr >= debug_progbuf_start && ((addr + len) <= (debug_progbuf_start + program_buffer_bytes))) { + memcpy(program_buffer + addr - debug_progbuf_start, bytes, len); + + return true; + } + + if (addr == DEBUG_ROM_HALTED) { + assert (len == 4); + if (!hart_state[id].halted) { + hart_state[id].halted = true; + if (hart_state[id].haltgroup) { + for (unsigned i = 0; i < nprocs; i++) { + if (!hart_state[i].halted && + hart_state[i].haltgroup == hart_state[id].haltgroup) { + processor_t *proc = sim->get_core(i); + proc->halt_request = proc->HR_GROUP; + // TODO: What if the debugger comes and writes dmcontrol before the + // halt occurs? + } + } + } + } + if (dmcontrol.hartsel == id) { + if (0 == (debug_rom_flags[id] & (1 << DEBUG_ROM_FLAG_GO))){ + if (dmcontrol.hartsel == id) { + abstract_command_completed = true; + } + } + } + return true; + } + + if (addr == DEBUG_ROM_GOING) { + assert(len == 4); + debug_rom_flags[id] &= ~(1 << DEBUG_ROM_FLAG_GO); + return true; + } + + if (addr == DEBUG_ROM_RESUMING) { + assert (len == 4); + hart_state[id].halted = false; + hart_state[id].resumeack = true; + debug_rom_flags[id] &= ~(1 << DEBUG_ROM_FLAG_RESUME); + return true; + } + + if (addr == DEBUG_ROM_EXCEPTION) { + if (abstractcs.cmderr == CMDERR_NONE) { + abstractcs.cmderr = CMDERR_EXCEPTION; + } + return true; + } + + D(fprintf(stderr, "ERROR: invalid store to debug module: %zd bytes at 0x%016" + PRIx64 "\n", len, addr)); + return false; +} + +void debug_module_t::write32(uint8_t *memory, unsigned int index, uint32_t value) +{ + uint8_t* base = memory + index * 4; + base[0] = value & 0xff; + base[1] = (value >> 8) & 0xff; + base[2] = (value >> 16) & 0xff; + base[3] = (value >> 24) & 0xff; +} + +uint32_t debug_module_t::read32(uint8_t *memory, unsigned int index) +{ + uint8_t* base = memory + index * 4; + uint32_t value = ((uint32_t) base[0]) | + (((uint32_t) base[1]) << 8) | + (((uint32_t) base[2]) << 16) | + (((uint32_t) base[3]) << 24); + return value; +} + +processor_t *debug_module_t::processor(unsigned hartid) const +{ + processor_t *proc = NULL; + try { + proc = sim->get_core(hartid); + } catch (const std::out_of_range&) { + } + return proc; +} + +bool debug_module_t::hart_selected(unsigned hartid) const +{ + if (dmcontrol.hasel) { + return hartid == dmcontrol.hartsel || hart_array_mask[hartid]; + } else { + return hartid == dmcontrol.hartsel; + } +} + +unsigned debug_module_t::sb_access_bits() +{ + return 8 << sbcs.sbaccess; +} + +void debug_module_t::sb_autoincrement() +{ + if (!sbcs.autoincrement || !config.max_sba_data_width) + return; + + uint64_t value = sbaddress[0] + sb_access_bits() / 8; + sbaddress[0] = value; + uint32_t carry = value >> 32; + + value = sbaddress[1] + carry; + sbaddress[1] = value; + carry = value >> 32; + + value = sbaddress[2] + carry; + sbaddress[2] = value; + carry = value >> 32; + + sbaddress[3] += carry; +} + +void debug_module_t::sb_read() +{ + reg_t address = ((uint64_t) sbaddress[1] << 32) | sbaddress[0]; + try { + if (sbcs.sbaccess == 0 && config.max_sba_data_width >= 8) { + sbdata[0] = sim->debug_mmu->load_uint8(address); + } else if (sbcs.sbaccess == 1 && config.max_sba_data_width >= 16) { + sbdata[0] = sim->debug_mmu->load_uint16(address); + } else if (sbcs.sbaccess == 2 && config.max_sba_data_width >= 32) { + sbdata[0] = sim->debug_mmu->load_uint32(address); + } else if (sbcs.sbaccess == 3 && config.max_sba_data_width >= 64) { + uint64_t value = sim->debug_mmu->load_uint64(address); + sbdata[0] = value; + sbdata[1] = value >> 32; + } else { + sbcs.error = 3; + } + } catch (trap_load_access_fault& t) { + sbcs.error = 2; + } +} + +void debug_module_t::sb_write() +{ + reg_t address = ((uint64_t) sbaddress[1] << 32) | sbaddress[0]; + D(fprintf(stderr, "sb_write() 0x%x @ 0x%lx\n", sbdata[0], address)); + if (sbcs.sbaccess == 0 && config.max_sba_data_width >= 8) { + sim->debug_mmu->store_uint8(address, sbdata[0]); + } else if (sbcs.sbaccess == 1 && config.max_sba_data_width >= 16) { + sim->debug_mmu->store_uint16(address, sbdata[0]); + } else if (sbcs.sbaccess == 2 && config.max_sba_data_width >= 32) { + sim->debug_mmu->store_uint32(address, sbdata[0]); + } else if (sbcs.sbaccess == 3 && config.max_sba_data_width >= 64) { + sim->debug_mmu->store_uint64(address, + (((uint64_t) sbdata[1]) << 32) | sbdata[0]); + } else { + sbcs.error = 3; + } +} + +bool debug_module_t::dmi_read(unsigned address, uint32_t *value) +{ + uint32_t result = 0; + D(fprintf(stderr, "dmi_read(0x%x) -> ", address)); + if (address >= DM_DATA0 && address < DM_DATA0 + abstractcs.datacount) { + unsigned i = address - DM_DATA0; + result = read32(dmdata, i); + if (abstractcs.busy) { + result = -1; + D(fprintf(stderr, "\ndmi_read(0x%02x (data[%d]) -> -1 because abstractcs.busy==true\n", address, i)); + } + + if (abstractcs.busy && abstractcs.cmderr == CMDERR_NONE) { + abstractcs.cmderr = CMDERR_BUSY; + } + + if (!abstractcs.busy && ((abstractauto.autoexecdata >> i) & 1)) { + perform_abstract_command(); + } + } else if (address >= DM_PROGBUF0 && address < DM_PROGBUF0 + config.progbufsize) { + unsigned i = address - DM_PROGBUF0; + result = read32(program_buffer, i); + if (abstractcs.busy) { + result = -1; + D(fprintf(stderr, "\ndmi_read(0x%02x (progbuf[%d]) -> -1 because abstractcs.busy==true\n", address, i)); + } + if (!abstractcs.busy && ((abstractauto.autoexecprogbuf >> i) & 1)) { + perform_abstract_command(); + } + + } else { + switch (address) { + case DM_DMCONTROL: + { + result = set_field(result, DM_DMCONTROL_HALTREQ, dmcontrol.haltreq); + result = set_field(result, DM_DMCONTROL_RESUMEREQ, dmcontrol.resumereq); + result = set_field(result, DM_DMCONTROL_HARTSELHI, + dmcontrol.hartsel >> DM_DMCONTROL_HARTSELLO_LENGTH); + result = set_field(result, DM_DMCONTROL_HASEL, dmcontrol.hasel); + result = set_field(result, DM_DMCONTROL_HARTSELLO, dmcontrol.hartsel); + result = set_field(result, DM_DMCONTROL_HARTRESET, dmcontrol.hartreset); + result = set_field(result, DM_DMCONTROL_NDMRESET, dmcontrol.ndmreset); + result = set_field(result, DM_DMCONTROL_DMACTIVE, dmcontrol.dmactive); + } + break; + case DM_DMSTATUS: + { + dmstatus.allhalted = true; + dmstatus.anyhalted = false; + dmstatus.allrunning = true; + dmstatus.anyrunning = false; + dmstatus.allnonexistant = true; + dmstatus.allresumeack = true; + dmstatus.anyresumeack = false; + for (unsigned i = 0; i < nprocs; i++) { + if (hart_selected(i)) { + dmstatus.allnonexistant = false; + if (hart_state[i].resumeack) { + dmstatus.anyresumeack = true; + } else { + dmstatus.allresumeack = false; + } + if (hart_state[i].halted) { + dmstatus.allrunning = false; + dmstatus.anyhalted = true; + } else { + dmstatus.allhalted = false; + dmstatus.anyrunning = true; + } + } + } + + // We don't allow selecting non-existant harts through + // hart_array_mask, so the only way it's possible is by writing a + // non-existant hartsel. + dmstatus.anynonexistant = (dmcontrol.hartsel >= nprocs); + + dmstatus.allunavail = false; + dmstatus.anyunavail = false; + + result = set_field(result, DM_DMSTATUS_IMPEBREAK, + dmstatus.impebreak); + result = set_field(result, DM_DMSTATUS_ALLHAVERESET, + hart_state[dmcontrol.hartsel].havereset); + result = set_field(result, DM_DMSTATUS_ANYHAVERESET, + hart_state[dmcontrol.hartsel].havereset); + result = set_field(result, DM_DMSTATUS_ALLNONEXISTENT, dmstatus.allnonexistant); + result = set_field(result, DM_DMSTATUS_ALLUNAVAIL, dmstatus.allunavail); + result = set_field(result, DM_DMSTATUS_ALLRUNNING, dmstatus.allrunning); + result = set_field(result, DM_DMSTATUS_ALLHALTED, dmstatus.allhalted); + result = set_field(result, DM_DMSTATUS_ALLRESUMEACK, dmstatus.allresumeack); + result = set_field(result, DM_DMSTATUS_ANYNONEXISTENT, dmstatus.anynonexistant); + result = set_field(result, DM_DMSTATUS_ANYUNAVAIL, dmstatus.anyunavail); + result = set_field(result, DM_DMSTATUS_ANYRUNNING, dmstatus.anyrunning); + result = set_field(result, DM_DMSTATUS_ANYHALTED, dmstatus.anyhalted); + result = set_field(result, DM_DMSTATUS_ANYRESUMEACK, dmstatus.anyresumeack); + result = set_field(result, DM_DMSTATUS_AUTHENTICATED, dmstatus.authenticated); + result = set_field(result, DM_DMSTATUS_AUTHBUSY, dmstatus.authbusy); + result = set_field(result, DM_DMSTATUS_VERSION, dmstatus.version); + } + break; + case DM_ABSTRACTCS: + result = set_field(result, DM_ABSTRACTCS_CMDERR, abstractcs.cmderr); + result = set_field(result, DM_ABSTRACTCS_BUSY, abstractcs.busy); + result = set_field(result, DM_ABSTRACTCS_DATACOUNT, abstractcs.datacount); + result = set_field(result, DM_ABSTRACTCS_PROGBUFSIZE, + abstractcs.progbufsize); + break; + case DM_ABSTRACTAUTO: + result = set_field(result, DM_ABSTRACTAUTO_AUTOEXECPROGBUF, abstractauto.autoexecprogbuf); + result = set_field(result, DM_ABSTRACTAUTO_AUTOEXECDATA, abstractauto.autoexecdata); + break; + case DM_COMMAND: + result = 0; + break; + case DM_HARTINFO: + result = set_field(result, DM_HARTINFO_NSCRATCH, 1); + result = set_field(result, DM_HARTINFO_DATAACCESS, 1); + result = set_field(result, DM_HARTINFO_DATASIZE, abstractcs.datacount); + result = set_field(result, DM_HARTINFO_DATAADDR, debug_data_start); + break; + case DM_HAWINDOWSEL: + result = hawindowsel; + break; + case DM_HAWINDOW: + { + unsigned base = hawindowsel * 32; + for (unsigned i = 0; i < 32; i++) { + unsigned n = base + i; + if (n < nprocs && hart_array_mask[n]) { + result |= 1 << i; + } + } + } + break; + case DM_SBCS: + result = set_field(result, DM_SBCS_SBVERSION, sbcs.version); + result = set_field(result, DM_SBCS_SBREADONADDR, sbcs.readonaddr); + result = set_field(result, DM_SBCS_SBACCESS, sbcs.sbaccess); + result = set_field(result, DM_SBCS_SBAUTOINCREMENT, sbcs.autoincrement); + result = set_field(result, DM_SBCS_SBREADONDATA, sbcs.readondata); + result = set_field(result, DM_SBCS_SBERROR, sbcs.error); + result = set_field(result, DM_SBCS_SBASIZE, sbcs.asize); + result = set_field(result, DM_SBCS_SBACCESS128, sbcs.access128); + result = set_field(result, DM_SBCS_SBACCESS64, sbcs.access64); + result = set_field(result, DM_SBCS_SBACCESS32, sbcs.access32); + result = set_field(result, DM_SBCS_SBACCESS16, sbcs.access16); + result = set_field(result, DM_SBCS_SBACCESS8, sbcs.access8); + break; + case DM_SBADDRESS0: + result = sbaddress[0]; + break; + case DM_SBADDRESS1: + result = sbaddress[1]; + break; + case DM_SBADDRESS2: + result = sbaddress[2]; + break; + case DM_SBADDRESS3: + result = sbaddress[3]; + break; + case DM_SBDATA0: + result = sbdata[0]; + if (sbcs.error == 0) { + if (sbcs.readondata) { + sb_read(); + } + if (sbcs.error == 0) { + sb_autoincrement(); + } + } + break; + case DM_SBDATA1: + result = sbdata[1]; + break; + case DM_SBDATA2: + result = sbdata[2]; + break; + case DM_SBDATA3: + result = sbdata[3]; + break; + case DM_AUTHDATA: + result = challenge; + break; + case DM_DMCS2: + result = set_field(result, DM_DMCS2_GROUP, + hart_state[dmcontrol.hartsel].haltgroup); + break; + default: + result = 0; + D(fprintf(stderr, "Unexpected. Returning Error.")); + return false; + } + } + D(fprintf(stderr, "0x%x\n", result)); + *value = result; + return true; +} + +void debug_module_t::run_test_idle() +{ + if (rti_remaining > 0) { + rti_remaining--; + } + if (rti_remaining == 0 && abstractcs.busy && abstract_command_completed) { + abstractcs.busy = false; + } +} + +static bool is_fpu_reg(unsigned regno) +{ + return (regno >= 0x1020 && regno <= 0x103f) || regno == CSR_FFLAGS || + regno == CSR_FRM || regno == CSR_FCSR; +} + +bool debug_module_t::perform_abstract_command() +{ + if (abstractcs.cmderr != CMDERR_NONE) + return true; + if (abstractcs.busy) { + abstractcs.cmderr = CMDERR_BUSY; + return true; + } + + if ((command >> 24) == 0) { + // register access + unsigned size = get_field(command, AC_ACCESS_REGISTER_AARSIZE); + bool write = get_field(command, AC_ACCESS_REGISTER_WRITE); + unsigned regno = get_field(command, AC_ACCESS_REGISTER_REGNO); + + if (!hart_state[dmcontrol.hartsel].halted) { + abstractcs.cmderr = CMDERR_HALTRESUME; + return true; + } + + unsigned i = 0; + if (get_field(command, AC_ACCESS_REGISTER_TRANSFER)) { + + if (is_fpu_reg(regno)) { + // Save S0 + write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH0)); + // Save mstatus + write32(debug_abstract, i++, csrr(S0, CSR_MSTATUS)); + write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH1)); + // Set mstatus.fs + assert((MSTATUS_FS & 0xfff) == 0); + write32(debug_abstract, i++, lui(S0, MSTATUS_FS >> 12)); + write32(debug_abstract, i++, csrrs(ZERO, S0, CSR_MSTATUS)); + } + + if (regno < 0x1000 && config.support_abstract_csr_access) { + if (!is_fpu_reg(regno)) { + write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH0)); + } + + if (write) { + switch (size) { + case 2: + write32(debug_abstract, i++, lw(S0, ZERO, debug_data_start)); + break; + case 3: + write32(debug_abstract, i++, ld(S0, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + write32(debug_abstract, i++, csrw(S0, regno)); + + } else { + write32(debug_abstract, i++, csrr(S0, regno)); + switch (size) { + case 2: + write32(debug_abstract, i++, sw(S0, ZERO, debug_data_start)); + break; + case 3: + write32(debug_abstract, i++, sd(S0, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + } + if (!is_fpu_reg(regno)) { + write32(debug_abstract, i++, csrr(S0, CSR_DSCRATCH0)); + } + + } else if (regno >= 0x1000 && regno < 0x1020) { + unsigned regnum = regno - 0x1000; + + switch (size) { + case 2: + if (write) + write32(debug_abstract, i++, lw(regnum, ZERO, debug_data_start)); + else + write32(debug_abstract, i++, sw(regnum, ZERO, debug_data_start)); + break; + case 3: + if (write) + write32(debug_abstract, i++, ld(regnum, ZERO, debug_data_start)); + else + write32(debug_abstract, i++, sd(regnum, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + + if (regno == 0x1000 + S0 && write) { + /* + * The exception handler starts out be restoring dscratch to s0, + * which was saved before executing the abstract memory region. Since + * we just wrote s0, also make sure to write that same value to + * dscratch in case an exception occurs in a program buffer that + * might be executed later. + */ + write32(debug_abstract, i++, csrw(S0, CSR_DSCRATCH0)); + } + + } else if (regno >= 0x1020 && regno < 0x1040) { + unsigned fprnum = regno - 0x1020; + + if (write) { + switch (size) { + case 2: + write32(debug_abstract, i++, flw(fprnum, ZERO, debug_data_start)); + break; + case 3: + write32(debug_abstract, i++, fld(fprnum, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + + } else { + switch (size) { + case 2: + write32(debug_abstract, i++, fsw(fprnum, ZERO, debug_data_start)); + break; + case 3: + write32(debug_abstract, i++, fsd(fprnum, ZERO, debug_data_start)); + break; + default: + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + } + + } else if (regno >= 0xc000 && (regno & 1) == 1) { + // Support odd-numbered custom registers, to allow for debugger testing. + unsigned custom_number = regno - 0xc000; + abstractcs.cmderr = CMDERR_NONE; + if (write) { + // Writing V to custom register N will cause future reads of N to + // return V, reads of N-1 will return V-1, etc. + custom_base = read32(dmdata, 0) - custom_number; + } else { + write32(dmdata, 0, custom_number + custom_base); + write32(dmdata, 1, 0); + } + return true; + + } else { + abstractcs.cmderr = CMDERR_NOTSUP; + return true; + } + + if (is_fpu_reg(regno)) { + // restore mstatus + write32(debug_abstract, i++, csrr(S0, CSR_DSCRATCH1)); + write32(debug_abstract, i++, csrw(S0, CSR_MSTATUS)); + // restore s0 + write32(debug_abstract, i++, csrr(S0, CSR_DSCRATCH0)); + } + } + + if (get_field(command, AC_ACCESS_REGISTER_POSTEXEC)) { + write32(debug_abstract, i, + jal(ZERO, debug_progbuf_start - debug_abstract_start - 4 * i)); + i++; + } else { + write32(debug_abstract, i++, ebreak()); + } + + debug_rom_flags[dmcontrol.hartsel] |= 1 << DEBUG_ROM_FLAG_GO; + rti_remaining = config.abstract_rti; + abstract_command_completed = false; + + abstractcs.busy = true; + } else { + abstractcs.cmderr = CMDERR_NOTSUP; + } + return true; +} + +bool debug_module_t::dmi_write(unsigned address, uint32_t value) +{ + D(fprintf(stderr, "dmi_write(0x%x, 0x%x)\n", address, value)); + + if (!dmstatus.authenticated && address != DM_AUTHDATA && + address != DM_DMCONTROL) + return false; + + if (address >= DM_DATA0 && address < DM_DATA0 + abstractcs.datacount) { + unsigned i = address - DM_DATA0; + if (!abstractcs.busy) + write32(dmdata, address - DM_DATA0, value); + + if (abstractcs.busy && abstractcs.cmderr == CMDERR_NONE) { + abstractcs.cmderr = CMDERR_BUSY; + } + + if (!abstractcs.busy && ((abstractauto.autoexecdata >> i) & 1)) { + perform_abstract_command(); + } + return true; + + } else if (address >= DM_PROGBUF0 && address < DM_PROGBUF0 + config.progbufsize) { + unsigned i = address - DM_PROGBUF0; + + if (!abstractcs.busy) + write32(program_buffer, i, value); + + if (!abstractcs.busy && ((abstractauto.autoexecprogbuf >> i) & 1)) { + perform_abstract_command(); + } + return true; + + } else { + switch (address) { + case DM_DMCONTROL: + { + if (!dmcontrol.dmactive && get_field(value, DM_DMCONTROL_DMACTIVE)) + reset(); + dmcontrol.dmactive = get_field(value, DM_DMCONTROL_DMACTIVE); + if (!dmstatus.authenticated || !dmcontrol.dmactive) + return true; + + dmcontrol.haltreq = get_field(value, DM_DMCONTROL_HALTREQ); + dmcontrol.resumereq = get_field(value, DM_DMCONTROL_RESUMEREQ); + dmcontrol.hartreset = get_field(value, DM_DMCONTROL_HARTRESET); + dmcontrol.ndmreset = get_field(value, DM_DMCONTROL_NDMRESET); + if (config.support_hasel) + dmcontrol.hasel = get_field(value, DM_DMCONTROL_HASEL); + else + dmcontrol.hasel = 0; + dmcontrol.hartsel = get_field(value, DM_DMCONTROL_HARTSELHI) << + DM_DMCONTROL_HARTSELLO_LENGTH; + dmcontrol.hartsel |= get_field(value, DM_DMCONTROL_HARTSELLO); + dmcontrol.hartsel &= (1L<halt_request = dmcontrol.haltreq ? proc->HR_REGULAR : proc->HR_NONE; + if (dmcontrol.haltreq) { + D(fprintf(stderr, "halt hart %d\n", i)); + } + if (dmcontrol.resumereq) { + D(fprintf(stderr, "resume hart %d\n", i)); + debug_rom_flags[i] |= (1 << DEBUG_ROM_FLAG_RESUME); + hart_state[i].resumeack = false; + } + if (dmcontrol.hartreset) { + proc->reset(); + } + } + } + } + + if (dmcontrol.ndmreset) { + for (size_t i = 0; i < sim->nprocs(); i++) { + processor_t *proc = sim->get_core(i); + proc->reset(); + } + } + } + return true; + + case DM_COMMAND: + command = value; + return perform_abstract_command(); + + case DM_HAWINDOWSEL: + hawindowsel = value & ((1U<> i) & 1; + } + } + } + return true; + + case DM_ABSTRACTCS: + abstractcs.cmderr = (cmderr_t) (((uint32_t) (abstractcs.cmderr)) & (~(uint32_t)(get_field(value, DM_ABSTRACTCS_CMDERR)))); + return true; + + case DM_ABSTRACTAUTO: + abstractauto.autoexecprogbuf = get_field(value, + DM_ABSTRACTAUTO_AUTOEXECPROGBUF); + abstractauto.autoexecdata = get_field(value, + DM_ABSTRACTAUTO_AUTOEXECDATA); + return true; + case DM_SBCS: + sbcs.readonaddr = get_field(value, DM_SBCS_SBREADONADDR); + sbcs.sbaccess = get_field(value, DM_SBCS_SBACCESS); + sbcs.autoincrement = get_field(value, DM_SBCS_SBAUTOINCREMENT); + sbcs.readondata = get_field(value, DM_SBCS_SBREADONDATA); + sbcs.error &= ~get_field(value, DM_SBCS_SBERROR); + return true; + case DM_SBADDRESS0: + sbaddress[0] = value; + if (sbcs.error == 0 && sbcs.readonaddr) { + sb_read(); + sb_autoincrement(); + } + return true; + case DM_SBADDRESS1: + sbaddress[1] = value; + return true; + case DM_SBADDRESS2: + sbaddress[2] = value; + return true; + case DM_SBADDRESS3: + sbaddress[3] = value; + return true; + case DM_SBDATA0: + sbdata[0] = value; + if (sbcs.error == 0) { + sb_write(); + if (sbcs.error == 0) { + sb_autoincrement(); + } + } + return true; + case DM_SBDATA1: + sbdata[1] = value; + return true; + case DM_SBDATA2: + sbdata[2] = value; + return true; + case DM_SBDATA3: + sbdata[3] = value; + return true; + case DM_AUTHDATA: + D(fprintf(stderr, "debug authentication: got 0x%x; 0x%x unlocks\n", value, + challenge + secret)); + if (config.require_authentication) { + if (value == challenge + secret) { + dmstatus.authenticated = true; + } else { + dmstatus.authenticated = false; + challenge = random(); + } + } + return true; + case DM_DMCS2: + if (config.support_haltgroups && get_field(value, DM_DMCS2_HGWRITE)) { + hart_state[dmcontrol.hartsel].haltgroup = get_field(value, + DM_DMCS2_GROUP); + } + return true; + } + } + return false; +} + +void debug_module_t::proc_reset(unsigned id) +{ + hart_state[id].havereset = true; + hart_state[id].halted = false; + hart_state[id].haltgroup = 0; +} diff --git a/vendor/riscv-isa-sim/riscv/debug_module.h b/vendor/riscv-isa-sim/riscv/debug_module.h new file mode 100644 index 00000000..d79ce7d1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/debug_module.h @@ -0,0 +1,193 @@ +// See LICENSE for license details. +#ifndef _RISCV_DEBUG_MODULE_H +#define _RISCV_DEBUG_MODULE_H + +#include + +#include "abstract_device.h" +#include "mmu.h" + +class sim_t; +class bus_t; + +typedef struct { + // Size of program_buffer in 32-bit words, as exposed to the rest of the + // world. + unsigned progbufsize; + unsigned max_sba_data_width; + bool require_authentication; + unsigned abstract_rti; + bool support_hasel; + bool support_abstract_csr_access; + bool support_haltgroups; + bool support_impebreak; +} debug_module_config_t; + +typedef struct { + bool haltreq; + bool resumereq; + bool hasel; + unsigned hartsel; + bool hartreset; + bool dmactive; + bool ndmreset; +} dmcontrol_t; + +typedef struct { + bool impebreak; + bool allhavereset; + bool anyhavereset; + bool allnonexistant; + bool anynonexistant; + bool allunavail; + bool anyunavail; + bool allrunning; + bool anyrunning; + bool allhalted; + bool anyhalted; + bool allresumeack; + bool anyresumeack; + bool authenticated; + bool authbusy; + bool cfgstrvalid; + unsigned version; +} dmstatus_t; + +typedef enum cmderr { + CMDERR_NONE = 0, + CMDERR_BUSY = 1, + CMDERR_NOTSUP = 2, + CMDERR_EXCEPTION = 3, + CMDERR_HALTRESUME = 4, + CMDERR_OTHER = 7 +} cmderr_t; + +typedef struct { + bool busy; + unsigned datacount; + unsigned progbufsize; + cmderr_t cmderr; +} abstractcs_t; + +typedef struct { + unsigned autoexecprogbuf; + unsigned autoexecdata; +} abstractauto_t; + +typedef struct { + unsigned version; + bool readonaddr; + unsigned sbaccess; + bool autoincrement; + bool readondata; + unsigned error; + unsigned asize; + bool access128; + bool access64; + bool access32; + bool access16; + bool access8; +} sbcs_t; + +typedef struct { + bool halted; + bool resumeack; + bool havereset; + uint8_t haltgroup; +} hart_debug_state_t; + +class debug_module_t : public abstract_device_t +{ + public: + /* + * If require_authentication is true, then a debugger must authenticate as + * follows: + * 1. Read a 32-bit value from authdata: + * 2. Write the value that was read back, plus one, to authdata. + * + * abstract_rti is extra run-test/idle cycles that each abstract command + * takes to execute. Useful for testing OpenOCD. + */ + debug_module_t(sim_t *sim, const debug_module_config_t &config); + ~debug_module_t(); + + void add_device(bus_t *bus); + + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + + // Debug Module Interface that the debugger (in our case through JTAG DTM) + // uses to access the DM. + // Return true for success, false for failure. + bool dmi_read(unsigned address, uint32_t *value); + bool dmi_write(unsigned address, uint32_t value); + + // Called for every cycle the JTAG TAP spends in Run-Test/Idle. + void run_test_idle(); + + // Called when one of the attached harts was reset. + void proc_reset(unsigned id); + + private: + static const unsigned datasize = 2; + unsigned nprocs; + debug_module_config_t config; + // Actual size of the program buffer, which is 1 word bigger than we let on + // to implement the implicit ebreak at the end. + unsigned program_buffer_bytes; + static const unsigned debug_data_start = 0x380; + unsigned debug_progbuf_start; + + static const unsigned debug_abstract_size = 12; + unsigned debug_abstract_start; + // R/W this through custom registers, to allow debuggers to test that + // functionality. + unsigned custom_base; + + // We only support 1024 harts currently. More requires at least resizing + // the arrays below, and their corresponding special memory regions. + unsigned hartsellen = 10; + + sim_t *sim; + + uint8_t debug_rom_whereto[4]; + uint8_t debug_abstract[debug_abstract_size * 4]; + uint8_t *program_buffer; + uint8_t dmdata[datasize * 4]; + + std::vector hart_state; + uint8_t debug_rom_flags[1024]; + + void write32(uint8_t *rom, unsigned int index, uint32_t value); + uint32_t read32(uint8_t *rom, unsigned int index); + + void sb_autoincrement(); + void sb_read(); + void sb_write(); + unsigned sb_access_bits(); + + dmcontrol_t dmcontrol; + dmstatus_t dmstatus; + abstractcs_t abstractcs; + abstractauto_t abstractauto; + uint32_t command; + uint16_t hawindowsel; + std::vector hart_array_mask; + + sbcs_t sbcs; + uint32_t sbaddress[4]; + uint32_t sbdata[4]; + + uint32_t challenge; + const uint32_t secret = 1; + + processor_t *processor(unsigned hartid) const; + bool hart_selected(unsigned hartid) const; + void reset(); + bool perform_abstract_command(); + + bool abstract_command_completed; + unsigned rti_remaining; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/debug_rom_defines.h b/vendor/riscv-isa-sim/riscv/debug_rom_defines.h new file mode 100644 index 00000000..616cf590 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/debug_rom_defines.h @@ -0,0 +1,23 @@ +// See LICENSE file for license details. + +#ifndef DEBUG_ROM_DEFINES_H +#define DEBUG_ROM_DEFINES_H + +// These are implementation-specific addresses in the Debug Module +#define DEBUG_ROM_HALTED 0x100 +#define DEBUG_ROM_GOING 0x104 +#define DEBUG_ROM_RESUMING 0x108 +#define DEBUG_ROM_EXCEPTION 0x10C + +// Region of memory where each hart has 1 +// byte to read. +#define DEBUG_ROM_FLAGS 0x400 +#define DEBUG_ROM_FLAG_GO 0 +#define DEBUG_ROM_FLAG_RESUME 1 + +// These needs to match the link.ld +#define DEBUG_ROM_WHERETO 0x300 +#define DEBUG_ROM_ENTRY 0x800 +#define DEBUG_ROM_TVEC 0x808 + +#endif diff --git a/vendor/riscv-isa-sim/riscv/decode.h b/vendor/riscv-isa-sim/riscv/decode.h new file mode 100644 index 00000000..611c9107 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/decode.h @@ -0,0 +1,2996 @@ +// See LICENSE for license details. + +#ifndef _RISCV_DECODE_H +#define _RISCV_DECODE_H + +#if (-1 != ~0) || ((-1 >> 1) != -1) +# error spike requires a two''s-complement c++ implementation +#endif + +#include +#include +#include +#include +#include "encoding.h" +#include "config.h" +#include "common.h" +#include "softfloat_types.h" +#include "specialize.h" +#include + +typedef int64_t sreg_t; +typedef uint64_t reg_t; + +#ifdef __SIZEOF_INT128__ +typedef __int128 int128_t; +typedef unsigned __int128 uint128_t; +#endif + +const int NXPR = 32; +const int NFPR = 32; +const int NVPR = 32; +const int NCSR = 4096; + +#define X_RA 1 +#define X_SP 2 + +#define VCSR_VXRM_SHIFT 1 +#define VCSR_VXRM (0x3 << VCSR_VXRM_SHIFT) + +#define VCSR_VXSAT_SHIFT 0 +#define VCSR_VXSAT (0x1 << VCSR_VXSAT_SHIFT) + +#define FP_RD_NE 0 +#define FP_RD_0 1 +#define FP_RD_DN 2 +#define FP_RD_UP 3 +#define FP_RD_NMM 4 + +#define FSR_RD_SHIFT 5 +#define FSR_RD (0x7 << FSR_RD_SHIFT) + +#define FPEXC_NX 0x01 +#define FPEXC_UF 0x02 +#define FPEXC_OF 0x04 +#define FPEXC_DZ 0x08 +#define FPEXC_NV 0x10 + +#define FSR_AEXC_SHIFT 0 +#define FSR_NVA (FPEXC_NV << FSR_AEXC_SHIFT) +#define FSR_OFA (FPEXC_OF << FSR_AEXC_SHIFT) +#define FSR_UFA (FPEXC_UF << FSR_AEXC_SHIFT) +#define FSR_DZA (FPEXC_DZ << FSR_AEXC_SHIFT) +#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT) +#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA) + +#define insn_length(x) \ + (((x) & 0x03) < 0x03 ? 2 : \ + ((x) & 0x1f) < 0x1f ? 4 : \ + ((x) & 0x3f) < 0x3f ? 6 : \ + ((x) & 0x7f) == 0x7f ? 4 : \ + 8) +#define MAX_INSN_LENGTH 8 +#define PC_ALIGN 2 + +typedef uint64_t insn_bits_t; +class insn_t +{ +public: + insn_t() = default; + insn_t(insn_bits_t bits) : b(bits) {} + insn_bits_t bits() { return b & ~((UINT64_MAX) << (length() * 8)); } + int length() { return insn_length(b); } + int64_t i_imm() { return int64_t(b) >> 20; } + int64_t shamt() { return x(20, 6); } + int64_t s_imm() { return x(7, 5) + (xs(25, 7) << 5); } + int64_t sb_imm() { return (x(8, 4) << 1) + (x(25, 6) << 5) + (x(7, 1) << 11) + (imm_sign() << 12); } + int64_t u_imm() { return int64_t(b) >> 12 << 12; } + int64_t uj_imm() { return (x(21, 10) << 1) + (x(20, 1) << 11) + (x(12, 8) << 12) + (imm_sign() << 20); } + uint64_t rd() { return x(7, 5); } + uint64_t rs1() { return x(15, 5); } + uint64_t rs2() { return x(20, 5); } + uint64_t rs3() { return x(27, 5); } + uint64_t rm() { return x(12, 3); } + uint64_t csr() { return x(20, 12); } + uint64_t iorw() { return x(20, 8); } + uint64_t bs() { return x(30, 2); } // Crypto ISE - SM4/AES32 byte select. + uint64_t rcon() { return x(20, 4); } // Crypto ISE - AES64 round const. + + int64_t rvc_imm() { return x(2, 5) + (xs(12, 1) << 5); } + int64_t rvc_zimm() { return x(2, 5) + (x(12, 1) << 5); } + int64_t rvc_addi4spn_imm() { return (x(6, 1) << 2) + (x(5, 1) << 3) + (x(11, 2) << 4) + (x(7, 4) << 6); } + int64_t rvc_addi16sp_imm() { return (x(6, 1) << 4) + (x(2, 1) << 5) + (x(5, 1) << 6) + (x(3, 2) << 7) + (xs(12, 1) << 9); } + int64_t rvc_lwsp_imm() { return (x(4, 3) << 2) + (x(12, 1) << 5) + (x(2, 2) << 6); } + int64_t rvc_ldsp_imm() { return (x(5, 2) << 3) + (x(12, 1) << 5) + (x(2, 3) << 6); } + int64_t rvc_swsp_imm() { return (x(9, 4) << 2) + (x(7, 2) << 6); } + int64_t rvc_sdsp_imm() { return (x(10, 3) << 3) + (x(7, 3) << 6); } + int64_t rvc_lw_imm() { return (x(6, 1) << 2) + (x(10, 3) << 3) + (x(5, 1) << 6); } + int64_t rvc_ld_imm() { return (x(10, 3) << 3) + (x(5, 2) << 6); } + int64_t rvc_j_imm() { return (x(3, 3) << 1) + (x(11, 1) << 4) + (x(2, 1) << 5) + (x(7, 1) << 6) + (x(6, 1) << 7) + (x(9, 2) << 8) + (x(8, 1) << 10) + (xs(12, 1) << 11); } + int64_t rvc_b_imm() { return (x(3, 2) << 1) + (x(10, 2) << 3) + (x(2, 1) << 5) + (x(5, 2) << 6) + (xs(12, 1) << 8); } + int64_t rvc_simm3() { return x(10, 3); } + uint64_t rvc_rd() { return rd(); } + uint64_t rvc_rs1() { return rd(); } + uint64_t rvc_rs2() { return x(2, 5); } + uint64_t rvc_rs1s() { return 8 + x(7, 3); } + uint64_t rvc_rs2s() { return 8 + x(2, 3); } + + uint64_t v_vm() { return x(25, 1); } + uint64_t v_wd() { return x(26, 1); } + uint64_t v_nf() { return x(29, 3); } + uint64_t v_simm5() { return xs(15, 5); } + uint64_t v_zimm5() { return x(15, 5); } + uint64_t v_zimm10() { return x(20, 10); } + uint64_t v_zimm11() { return x(20, 11); } + uint64_t v_lmul() { return x(20, 2); } + uint64_t v_frac_lmul() { return x(22, 1); } + uint64_t v_sew() { return 1 << (x(23, 3) + 3); } + uint64_t v_width() { return x(12, 3); } + uint64_t v_mop() { return x(26, 2); } + uint64_t v_lumop() { return x(20, 5); } + uint64_t v_sumop() { return x(20, 5); } + uint64_t v_vta() { return x(26, 1); } + uint64_t v_vma() { return x(27, 1); } + uint64_t v_mew() { return x(28, 1); } + + uint64_t p_imm2() { return x(20, 2); } + uint64_t p_imm3() { return x(20, 3); } + uint64_t p_imm4() { return x(20, 4); } + uint64_t p_imm5() { return x(20, 5); } + uint64_t p_imm6() { return x(20, 6); } + +private: + insn_bits_t b; + uint64_t x(int lo, int len) { return (b >> lo) & ((insn_bits_t(1) << len) - 1); } + uint64_t xs(int lo, int len) { return int64_t(b) << (64 - lo - len) >> (64 - len); } + uint64_t imm_sign() { return xs(63, 1); } +}; + +template +class regfile_t +{ +public: + void write(size_t i, T value) + { + if (!zero_reg || i != 0) + data[i] = value; + } + const T& operator [] (size_t i) const + { + return data[i]; + } + regfile_t() + { + reset(); + } + void reset() + { + memset(data, 0, sizeof(data)); + } +private: + T data[N]; +}; + +// helpful macros, etc +#define MMU (*p->get_mmu()) +#define STATE (*p->get_state()) +#define FLEN (p->get_flen()) +#define CHECK_REG(reg) ((void) 0) +#define READ_REG(reg) ({ CHECK_REG(reg); STATE.XPR[reg]; }) +#define READ_FREG(reg) STATE.FPR[reg] +#define RD READ_REG(insn.rd()) +#define RS1 READ_REG(insn.rs1()) +#define RS2 READ_REG(insn.rs2()) +#define RS3 READ_REG(insn.rs3()) +#define WRITE_RD(value) WRITE_REG(insn.rd(), value) + +#ifndef RISCV_ENABLE_COMMITLOG +# define WRITE_REG(reg, value) ({ CHECK_REG(reg); STATE.XPR.write(reg, value); }) +# define WRITE_FREG(reg, value) DO_WRITE_FREG(reg, freg(value)) +# define WRITE_VSTATUS {} +#else + /* 0 : int + * 1 : floating + * 2 : vector reg + * 3 : vector hint + * 4 : csr + */ +# define WRITE_REG(reg, value) ({ \ + reg_t wdata = (value); /* value may have side effects */ \ + STATE.log_reg_write[(reg) << 4] = {wdata, 0}; \ + CHECK_REG(reg); \ + STATE.XPR.write(reg, wdata); \ + }) +# define WRITE_FREG(reg, value) ({ \ + freg_t wdata = freg(value); /* value may have side effects */ \ + STATE.log_reg_write[((reg) << 4) | 1] = wdata; \ + DO_WRITE_FREG(reg, wdata); \ + }) +# define WRITE_VSTATUS STATE.log_reg_write[3] = {0, 0}; +#endif + +// RVC macros +#define WRITE_RVC_RS1S(value) WRITE_REG(insn.rvc_rs1s(), value) +#define WRITE_RVC_RS2S(value) WRITE_REG(insn.rvc_rs2s(), value) +#define WRITE_RVC_FRS2S(value) WRITE_FREG(insn.rvc_rs2s(), value) +#define RVC_RS1 READ_REG(insn.rvc_rs1()) +#define RVC_RS2 READ_REG(insn.rvc_rs2()) +#define RVC_RS1S READ_REG(insn.rvc_rs1s()) +#define RVC_RS2S READ_REG(insn.rvc_rs2s()) +#define RVC_FRS2 READ_FREG(insn.rvc_rs2()) +#define RVC_FRS2S READ_FREG(insn.rvc_rs2s()) +#define RVC_SP READ_REG(X_SP) + +// FPU macros +#define FRS1 READ_FREG(insn.rs1()) +#define FRS2 READ_FREG(insn.rs2()) +#define FRS3 READ_FREG(insn.rs3()) +#define dirty_fp_state STATE.sstatus->dirty(SSTATUS_FS) +#define dirty_ext_state STATE.sstatus->dirty(SSTATUS_XS) +#define dirty_vs_state STATE.sstatus->dirty(SSTATUS_VS) +#define DO_WRITE_FREG(reg, value) (STATE.FPR.write(reg, value), dirty_fp_state) +#define WRITE_FRD(value) WRITE_FREG(insn.rd(), value) + +#define SHAMT (insn.i_imm() & 0x3F) +#define BRANCH_TARGET (pc + insn.sb_imm()) +#define JUMP_TARGET (pc + insn.uj_imm()) +#define RM ({ int rm = insn.rm(); \ + if (rm == 7) rm = STATE.frm->read(); \ + if (rm > 4) throw trap_illegal_instruction(insn.bits()); \ + rm; }) + +#define get_field(reg, mask) (((reg) & (decltype(reg))(mask)) / ((mask) & ~((mask) << 1))) +#define set_field(reg, mask, val) (((reg) & ~(decltype(reg))(mask)) | (((decltype(reg))(val) * ((mask) & ~((mask) << 1))) & (decltype(reg))(mask))) + +#define require_privilege(p) require(STATE.prv >= (p)) +#define require_novirt() if (unlikely(STATE.v)) throw trap_virtual_instruction(insn.bits()) +#define require_rv64 require(xlen == 64) +#define require_rv32 require(xlen == 32) +#define require_extension(s) require(p->extension_enabled(s)) +#define require_either_extension(A,B) require(p->extension_enabled(A) || p->extension_enabled(B)); +#define require_impl(s) require(p->supports_impl(s)) +#define require_fp require(STATE.sstatus->enabled(SSTATUS_FS)) +#define require_accelerator require(STATE.sstatus->enabled(SSTATUS_XS)) +#define require_vector_vs require(STATE.sstatus->enabled(SSTATUS_VS)) +#define require_vector(alu) \ + do { \ + require_vector_vs; \ + require_extension('V'); \ + require(!P.VU.vill); \ + if (alu && !P.VU.vstart_alu) \ + require(P.VU.vstart->read() == 0); \ + WRITE_VSTATUS; \ + dirty_vs_state; \ + } while (0); +#define require_vector_novtype(is_log, alu) \ + do { \ + require_vector_vs; \ + require_extension('V'); \ + if (alu && !P.VU.vstart_alu) \ + require(P.VU.vstart->read() == 0); \ + if (is_log) \ + WRITE_VSTATUS; \ + dirty_vs_state; \ + } while (0); +#define require_align(val, pos) require(is_aligned(val, pos)) +#define require_noover(astart, asize, bstart, bsize) \ + require(!is_overlapped(astart, asize, bstart, bsize)) +#define require_noover_widen(astart, asize, bstart, bsize) \ + require(!is_overlapped_widen(astart, asize, bstart, bsize)) +#define require_vm do { if (insn.v_vm() == 0) require(insn.rd() != 0); } while (0); +#define require_envcfg(field) \ + do { \ + if (((STATE.prv != PRV_M) && (m##field == 0)) || \ + ((STATE.prv == PRV_U && !STATE.v) && (s##field == 0))) \ + throw trap_illegal_instruction(insn.bits()); \ + else if (STATE.v && ((h##field == 0) || \ + ((STATE.prv == PRV_U) && (s##field == 0)))) \ + throw trap_virtual_instruction(insn.bits()); \ + } while (0); + +#define set_fp_exceptions ({ if (softfloat_exceptionFlags) { \ + STATE.fflags->write(STATE.fflags->read() | softfloat_exceptionFlags); \ + } \ + softfloat_exceptionFlags = 0; }) + +#define sext32(x) ((sreg_t)(int32_t)(x)) +#define zext32(x) ((reg_t)(uint32_t)(x)) +#define sext_xlen(x) (((sreg_t)(x) << (64 - xlen)) >> (64 - xlen)) +#define zext(x, pos) (((reg_t)(x) << (64 - (pos))) >> (64 - (pos))) +#define zext_xlen(x) zext(x, xlen) + +#define set_pc(x) \ + do { p->check_pc_alignment(x); \ + npc = sext_xlen(x); \ + } while (0) + +#define set_pc_and_serialize(x) \ + do { reg_t __npc = (x) & p->pc_alignment_mask(); \ + npc = PC_SERIALIZE_AFTER; \ + STATE.pc = __npc; \ + } while (0) + +class wait_for_interrupt_t {}; + +#define wfi() \ + do { set_pc_and_serialize(npc); \ + npc = PC_SERIALIZE_WFI; \ + throw wait_for_interrupt_t(); \ + } while (0) + +#define serialize() set_pc_and_serialize(npc) + +/* Sentinel PC values to serialize simulator pipeline */ +#define PC_SERIALIZE_BEFORE 3 +#define PC_SERIALIZE_AFTER 5 +#define PC_SERIALIZE_WFI 7 +#define invalid_pc(pc) ((pc) & 1) + +/* Convenience wrappers to simplify softfloat code sequences */ +#define isBoxedF16(r) (isBoxedF32(r) && ((uint64_t)((r.v[0] >> 16) + 1) == ((uint64_t)1 << 48))) +#define unboxF16(r) (isBoxedF16(r) ? (uint16_t)r.v[0] : defaultNaNF16UI) +#define isBoxedF32(r) (isBoxedF64(r) && ((uint32_t)((r.v[0] >> 32) + 1) == 0)) +#define unboxF32(r) (isBoxedF32(r) ? (uint32_t)r.v[0] : defaultNaNF32UI) +#define isBoxedF64(r) ((r.v[1] + 1) == 0) +#define unboxF64(r) (isBoxedF64(r) ? r.v[0] : defaultNaNF64UI) +typedef float128_t freg_t; +inline float16_t f16(uint16_t v) { return { v }; } +inline float32_t f32(uint32_t v) { return { v }; } +inline float64_t f64(uint64_t v) { return { v }; } +inline float16_t f16(freg_t r) { return f16(unboxF16(r)); } +inline float32_t f32(freg_t r) { return f32(unboxF32(r)); } +inline float64_t f64(freg_t r) { return f64(unboxF64(r)); } +inline float128_t f128(freg_t r) { return r; } +inline freg_t freg(float16_t f) { return { ((uint64_t)-1 << 16) | f.v, (uint64_t)-1 }; } +inline freg_t freg(float32_t f) { return { ((uint64_t)-1 << 32) | f.v, (uint64_t)-1 }; } +inline freg_t freg(float64_t f) { return { f.v, (uint64_t)-1 }; } +inline freg_t freg(float128_t f) { return f; } +#define F16_SIGN ((uint16_t)1 << 15) +#define F32_SIGN ((uint32_t)1 << 31) +#define F64_SIGN ((uint64_t)1 << 63) +#define fsgnj16(a, b, n, x) \ + f16((f16(a).v & ~F16_SIGN) | ((((x) ? f16(a).v : (n) ? F16_SIGN : 0) ^ f16(b).v) & F16_SIGN)) +#define fsgnj32(a, b, n, x) \ + f32((f32(a).v & ~F32_SIGN) | ((((x) ? f32(a).v : (n) ? F32_SIGN : 0) ^ f32(b).v) & F32_SIGN)) +#define fsgnj64(a, b, n, x) \ + f64((f64(a).v & ~F64_SIGN) | ((((x) ? f64(a).v : (n) ? F64_SIGN : 0) ^ f64(b).v) & F64_SIGN)) + +#define isNaNF128(x) isNaNF128UI(x.v[1], x.v[0]) +inline float128_t defaultNaNF128() +{ + float128_t nan; + nan.v[1] = defaultNaNF128UI64; + nan.v[0] = defaultNaNF128UI0; + return nan; +} +inline freg_t fsgnj128(freg_t a, freg_t b, bool n, bool x) +{ + a.v[1] = (a.v[1] & ~F64_SIGN) | (((x ? a.v[1] : n ? F64_SIGN : 0) ^ b.v[1]) & F64_SIGN); + return a; +} +inline freg_t f128_negate(freg_t a) +{ + a.v[1] ^= F64_SIGN; + return a; +} + +#define validate_csr(which, write) ({ \ + if (!STATE.serialized) return PC_SERIALIZE_BEFORE; \ + STATE.serialized = false; \ + /* permissions check occurs in get_csr */ \ + (which); }) + +/* For debug only. This will fail if the native machine's float types are not IEEE */ +inline float to_f(float32_t f) { float r; memcpy(&r, &f, sizeof(r)); return r; } +inline double to_f(float64_t f) { double r; memcpy(&r, &f, sizeof(r)); return r; } +inline long double to_f(float128_t f) { long double r; memcpy(&r, &f, sizeof(r)); return r; } + +// Vector macros +#define e8 8 // 8b elements +#define e16 16 // 16b elements +#define e32 32 // 32b elements +#define e64 64 // 64b elements +#define e128 128 // 128b elements +#define e256 256 // 256b elements +#define e512 512 // 512b elements +#define e1024 1024 // 1024b elements + +#define vsext(x, sew) (((sreg_t)(x) << (64 - sew)) >> (64 - sew)) +#define vzext(x, sew) (((reg_t)(x) << (64 - sew)) >> (64 - sew)) + +#define DEBUG_RVV 0 + +#if DEBUG_RVV +#define DEBUG_RVV_FP_VV \ + printf("vfp(%lu) vd=%f vs1=%f vs2=%f\n", i, to_f(vd), to_f(vs1), to_f(vs2)); +#define DEBUG_RVV_FP_VF \ + printf("vfp(%lu) vd=%f vs1=%f vs2=%f\n", i, to_f(vd), to_f(rs1), to_f(vs2)); +#define DEBUG_RVV_FMA_VV \ + printf("vfma(%lu) vd=%f vs1=%f vs2=%f vd_old=%f\n", i, to_f(vd), to_f(vs1), to_f(vs2), to_f(vd_old)); +#define DEBUG_RVV_FMA_VF \ + printf("vfma(%lu) vd=%f vs1=%f vs2=%f vd_old=%f\n", i, to_f(vd), to_f(rs1), to_f(vs2), to_f(vd_old)); +#else +#define DEBUG_RVV_FP_VV 0 +#define DEBUG_RVV_FP_VF 0 +#define DEBUG_RVV_FMA_VV 0 +#define DEBUG_RVV_FMA_VF 0 +#endif + +// +// vector: masking skip helper +// +#define VI_MASK_VARS \ + const int midx = i / 64; \ + const int mpos = i % 64; + +#define VI_LOOP_ELEMENT_SKIP(BODY) \ + VI_MASK_VARS \ + if (insn.v_vm() == 0) { \ + BODY; \ + bool skip = ((P.VU.elt(0, midx) >> mpos) & 0x1) == 0; \ + if (skip) { \ + continue; \ + } \ + } + +#define VI_ELEMENT_SKIP(inx) \ + if (inx >= vl) { \ + continue; \ + } else if (inx < P.VU.vstart->read()) { \ + continue; \ + } else { \ + VI_LOOP_ELEMENT_SKIP(); \ + } + +// +// vector: operation and register acccess check helper +// +static inline bool is_overlapped(const int astart, int asize, + const int bstart, int bsize) +{ + asize = asize == 0 ? 1 : asize; + bsize = bsize == 0 ? 1 : bsize; + + const int aend = astart + asize; + const int bend = bstart + bsize; + + return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; +} + +static inline bool is_overlapped_widen(const int astart, int asize, + const int bstart, int bsize) +{ + asize = asize == 0 ? 1 : asize; + bsize = bsize == 0 ? 1 : bsize; + + const int aend = astart + asize; + const int bend = bstart + bsize; + + if (astart < bstart && + is_overlapped(astart, asize, bstart, bsize) && + !is_overlapped(astart, asize, bstart + bsize, bsize)) { + return false; + } else { + return std::max(aend, bend) - std::min(astart, bstart) < asize + bsize; + } +} + +static inline bool is_aligned(const unsigned val, const unsigned pos) +{ + return pos ? (val & (pos - 1)) == 0 : true; +} + +#define VI_NARROW_CHECK_COMMON \ + require_vector(true); \ + require(P.VU.vflmul <= 4); \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + require_align(insn.rs2(), P.VU.vflmul * 2); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_vm; \ + +#define VI_WIDE_CHECK_COMMON \ + require_vector(true); \ + require(P.VU.vflmul <= 4); \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + require_align(insn.rd(), P.VU.vflmul * 2); \ + require_vm; \ + +#define VI_CHECK_ST_INDEX(elt_width) \ + require_vector(false); \ + float vemul = ((float)elt_width / P.VU.vsew * P.VU.vflmul); \ + require(vemul >= 0.125 && vemul <= 8); \ + reg_t emul = vemul < 1 ? 1 : vemul; \ + reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), vemul); \ + require((nf * flmul) <= (NVPR / 4) && \ + (insn.rd() + nf * flmul) <= NVPR); \ + +#define VI_CHECK_LD_INDEX(elt_width) \ + VI_CHECK_ST_INDEX(elt_width); \ + for (reg_t idx = 0; idx < nf; ++idx) { \ + reg_t flmul = P.VU.vflmul < 1 ? 1 : P.VU.vflmul; \ + reg_t seg_vd = insn.rd() + flmul * idx; \ + if (elt_width > P.VU.vsew) { \ + if (seg_vd != insn.rs2()) \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } else if (elt_width < P.VU.vsew) { \ + if (vemul < 1) { \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } else { \ + require_noover_widen(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + if (nf >= 2) { \ + require_noover(seg_vd, P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + require_vm; \ + +#define VI_CHECK_MSS(is_vs1) \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), 1, insn.rs2(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (is_vs1) { \ + if (insn.rd() != insn.rs1()) \ + require_noover(insn.rd(), 1, insn.rs1(), P.VU.vflmul); \ + require_align(insn.rs1(), P.VU.vflmul); \ + } \ + +#define VI_CHECK_SSS(is_vs1) \ + require_vm; \ + if (P.VU.vflmul > 1) { \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (is_vs1) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_STORE(elt_width, is_mask_ldst) \ + require_vector(false); \ + reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8; \ + float vemul = is_mask_ldst ? 1 : ((float)veew / P.VU.vsew * P.VU.vflmul); \ + reg_t emul = vemul < 1 ? 1 : vemul; \ + require(vemul >= 0.125 && vemul <= 8); \ + require_align(insn.rd(), vemul); \ + require((nf * emul) <= (NVPR / 4) && \ + (insn.rd() + nf * emul) <= NVPR); \ + require(veew <= P.VU.ELEN); \ + +#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \ + VI_CHECK_STORE(elt_width, is_mask_ldst); \ + require_vm; \ + +#define VI_CHECK_DSS(is_vs1) \ + VI_WIDE_CHECK_COMMON; \ + require_align(insn.rs2(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs2(), P.VU.vflmul); \ + } \ + if (is_vs1) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_DDS(is_rs) \ + VI_WIDE_CHECK_COMMON; \ + require_align(insn.rs2(), P.VU.vflmul * 2); \ + if (is_rs) { \ + require_align(insn.rs1(), P.VU.vflmul); \ + if (P.VU.vflmul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul * 2, insn.rs1(), P.VU.vflmul); \ + } \ + } + +#define VI_CHECK_SDS(is_vs1) \ + VI_NARROW_CHECK_COMMON; \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul * 2); \ + if (is_vs1) \ + require_align(insn.rs1(), P.VU.vflmul); \ + +#define VI_CHECK_REDUCTION(is_wide) \ + require_vector(true); \ + if (is_wide) { \ + require(P.VU.vsew * 2 <= P.VU.ELEN); \ + } \ + require_align(insn.rs2(), P.VU.vflmul); \ + require(P.VU.vstart->read() == 0); \ + +#define VI_CHECK_SLIDE(is_over) \ + require_align(insn.rs2(), P.VU.vflmul); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_vm; \ + if (is_over) \ + require(insn.rd() != insn.rs2()); \ + + +// +// vector: loop header and end helper +// +#define VI_GENERAL_LOOP_BASE \ + require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + reg_t sew = P.VU.vsew; \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + +#define VI_LOOP_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_LOOP_END \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_REDUCTION_END(x) \ + } \ + if (vl > 0) { \ + vd_0_des = vd_0_res; \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_CARRY_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_MASK_VARS \ + auto v0 = P.VU.elt(0, midx); \ + const uint64_t mmask = UINT64_C(1) << mpos; \ + const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \ + uint64_t carry = insn.v_vm() == 0 ? (v0 >> mpos) & 0x1 : 0; \ + uint128_t res = 0; \ + auto &vd = P.VU.elt(rd_num, midx, true); + +#define VI_LOOP_CARRY_END \ + vd = (vd & ~mmask) | (((res) << mpos) & mmask); \ + } \ + P.VU.vstart->write(0); +#define VI_LOOP_WITH_CARRY_BASE \ + VI_GENERAL_LOOP_BASE \ + VI_MASK_VARS \ + auto &v0 = P.VU.elt(0, midx); \ + const uint128_t op_mask = (UINT64_MAX >> (64 - sew)); \ + uint64_t carry = (v0 >> mpos) & 0x1; + +#define VI_LOOP_CMP_BASE \ + require(P.VU.vsew >= e8 && P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + reg_t sew = P.VU.vsew; \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t &vdi = P.VU.elt(insn.rd(), midx, true); \ + uint64_t res = 0; + +#define VI_LOOP_CMP_END \ + vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_MASK(op) \ + require(P.VU.vsew <= e64); \ + require_vector(true); \ + reg_t vl = P.VU.vl->read(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + int midx = i / 64; \ + int mpos = i % 64; \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t vs2 = P.VU.elt(insn.rs2(), midx); \ + uint64_t vs1 = P.VU.elt(insn.rs1(), midx); \ + uint64_t &res = P.VU.elt(insn.rd(), midx, true); \ + res = (res & ~mmask) | ((op) & (1ULL << mpos)); \ + } \ + P.VU.vstart->write(0); + +#define VI_LOOP_NSHIFT_BASE \ + VI_GENERAL_LOOP_BASE; \ + VI_LOOP_ELEMENT_SKIP({ \ + require(!(insn.rd() == 0 && P.VU.vflmul > 1)); \ + }); + + +#define INT_ROUNDING(result, xrm, gb) \ + do { \ + const uint64_t lsb = 1UL << (gb); \ + const uint64_t lsb_half = lsb >> 1; \ + switch (xrm) { \ + case VRM::RNU: \ + result += lsb_half; \ + break; \ + case VRM::RNE: \ + if ((result & lsb_half) && ((result & (lsb_half - 1)) || (result & lsb))) \ + result += lsb; \ + break; \ + case VRM::RDN: \ + break; \ + case VRM::ROD: \ + if (result & (lsb - 1)) \ + result |= lsb; \ + break; \ + case VRM::INVALID_RM: \ + assert(true); \ + } \ + } while (0) + +// +// vector: integer and masking operand access helper +// +#define VXI_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); + +#define VV_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_U_PARAMS(x) \ + type_usew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type zimm5 = (type_usew_t::type)insn.v_zimm5(); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define XV_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, RS1); + +#define VV_SU_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_SU_PARAMS(x) \ + type_sew_t::type &vd = P.VU.elt::type>(rd_num, i, true); \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_UCMP_PARAMS(x) \ + type_usew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_UCMP_PARAMS(x) \ + type_usew_t::type rs1 = (type_usew_t::type)RS1; \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_UCMP_PARAMS(x) \ + type_usew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VV_CMP_PARAMS(x) \ + type_sew_t::type vs1 = P.VU.elt::type>(rs1_num, i); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VX_CMP_PARAMS(x) \ + type_sew_t::type rs1 = (type_sew_t::type)RS1; \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_CMP_PARAMS(x) \ + type_sew_t::type simm5 = (type_sew_t::type)insn.v_simm5(); \ + type_sew_t::type vs2 = P.VU.elt::type>(rs2_num, i); + +#define VI_XI_SLIDEDOWN_PARAMS(x, off) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2 = P.VU.elt::type>(rs2_num, i + off); + +#define VI_XI_SLIDEUP_PARAMS(x, offset) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2 = P.VU.elt::type>(rs2_num, i - offset); + +#define VI_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto zimm5 = (type_usew_t::type)insn.v_zimm5(); + +#define VX_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; + +#define VV_NARROW_PARAMS(sew1, sew2) \ + auto &vd = P.VU.elt::type>(rd_num, i, true); \ + auto vs2_u = P.VU.elt::type>(rs2_num, i); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); + +#define XI_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; \ + auto simm5 = (type_sew_t::type)insn.v_simm5(); \ + +#define VV_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); \ + +#define XI_WITH_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto rs1 = (type_sew_t::type)RS1; \ + auto simm5 = (type_sew_t::type)insn.v_simm5(); \ + auto &vd = P.VU.elt::type>(rd_num, i, true); + +#define VV_WITH_CARRY_PARAMS(x) \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + auto vs1 = P.VU.elt::type>(rs1_num, i); \ + auto &vd = P.VU.elt::type>(rd_num, i, true); + +#define VFP_V_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define VFP_VV_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t vs1 = P.VU.elt(rs1_num, i); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define VFP_VF_PARAMS(width) \ + float##width##_t &vd = P.VU.elt(rd_num, i, true); \ + float##width##_t rs1 = f##width(READ_FREG(rs1_num)); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); + +#define CVT_FP_TO_FP_PARAMS(from_width, to_width) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +#define CVT_INT_TO_FP_PARAMS(from_width, to_width, sign) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +#define CVT_FP_TO_INT_PARAMS(from_width, to_width, sign) \ + auto vs2 = P.VU.elt(rs2_num, i); \ + auto &vd = P.VU.elt(rd_num, i, true); + +// +// vector: integer and masking operation loop +// + +#define INSNS_BASE(PARAMS, BODY) \ + if (sew == e8) { \ + PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + PARAMS(e64); \ + BODY; \ + } + +// comparision result to masking register +#define VI_LOOP_CMP_BODY(PARAMS, BODY) \ + VI_LOOP_CMP_BASE \ + INSNS_BASE(PARAMS, BODY) \ + VI_LOOP_CMP_END + +#define VI_VV_LOOP_CMP(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CMP_BODY(VV_CMP_PARAMS, BODY) + +#define VI_VX_LOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VX_CMP_PARAMS, BODY) + +#define VI_VI_LOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VI_CMP_PARAMS, BODY) + +#define VI_VV_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CMP_BODY(VV_UCMP_PARAMS, BODY) + +#define VI_VX_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VX_UCMP_PARAMS, BODY) + +#define VI_VI_ULOOP_CMP(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CMP_BODY(VI_UCMP_PARAMS, BODY) + +// merge and copy loop +#define VI_MERGE_VARS \ + VI_MASK_VARS \ + bool use_first = (P.VU.elt(0, midx) >> mpos) & 0x1; + +#define VI_MERGE_LOOP_BASE \ + require_vector(true); \ + VI_GENERAL_LOOP_BASE \ + VI_MERGE_VARS + +#define VI_VV_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(true); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_MERGE_LOOP_BASE \ + if (sew == e8) { \ + VI_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VF_MERGE_LOOP(BODY) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_MERGE_VARS \ + if (P.VU.vsew == e16) { \ + VFP_VF_PARAMS(16); \ + BODY; \ + } else if (P.VU.vsew == e32) { \ + VFP_VF_PARAMS(32); \ + BODY; \ + } else if (P.VU.vsew == e64) { \ + VFP_VF_PARAMS(64); \ + BODY; \ + } \ + VI_LOOP_END + +// reduction loop - signed +#define VI_LOOP_REDUCTION_BASE(x) \ + require(x >= e8 && x <= e64); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); \ + +#define REDUCTION_LOOP(x, BODY) \ + VI_LOOP_REDUCTION_BASE(x) \ + BODY; \ + VI_LOOP_REDUCTION_END(x) + +#define VI_VV_LOOP_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(false); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + REDUCTION_LOOP(e8, BODY) \ + } else if (sew == e16) { \ + REDUCTION_LOOP(e16, BODY) \ + } else if (sew == e32) { \ + REDUCTION_LOOP(e32, BODY) \ + } else if (sew == e64) { \ + REDUCTION_LOOP(e64, BODY) \ + } + +// reduction loop - unsigned +#define VI_ULOOP_REDUCTION_BASE(x) \ + require(x >= e8 && x <= e64); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define REDUCTION_ULOOP(x, BODY) \ + VI_ULOOP_REDUCTION_BASE(x) \ + BODY; \ + VI_LOOP_REDUCTION_END(x) + +#define VI_VV_ULOOP_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(false); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + REDUCTION_ULOOP(e8, BODY) \ + } else if (sew == e16) { \ + REDUCTION_ULOOP(e16, BODY) \ + } else if (sew == e32) { \ + REDUCTION_ULOOP(e32, BODY) \ + } else if (sew == e64) { \ + REDUCTION_ULOOP(e64, BODY) \ + } + + +// genearl VXI signed/unsigned loop +#define VI_VV_ULOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VV_LOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_ULOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_ULOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_U_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_U_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_U_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_U_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VI_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VI_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VI_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +// signed unsigned operation loop (e.g. mulhsu) +#define VI_VV_SU_LOOP(BODY) \ + VI_CHECK_SSS(true) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_SU_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_SU_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_SU_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VV_SU_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_SU_LOOP(BODY) \ + VI_CHECK_SSS(false) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_SU_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_SU_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_SU_PARAMS(e32); \ + BODY; \ + } else if (sew == e64) { \ + VX_SU_PARAMS(e64); \ + BODY; \ + } \ + VI_LOOP_END + +// narrow operation loop +#define VI_VV_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(true); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VV_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VV_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VX_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VX_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP_NARROW(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VI_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VI_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VI_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VI_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VI_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VI_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VI_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(false); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VX_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VX_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VX_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VV_LOOP_NSHIFT(BODY) \ + VI_CHECK_SDS(true); \ + VI_LOOP_NSHIFT_BASE \ + if (sew == e8) { \ + VV_NARROW_PARAMS(e8, e16) \ + BODY; \ + } else if (sew == e16) { \ + VV_NARROW_PARAMS(e16, e32) \ + BODY; \ + } else if (sew == e32) { \ + VV_NARROW_PARAMS(e32, e64) \ + BODY; \ + } \ + VI_LOOP_END + +// widen operation loop +#define VI_VV_LOOP_WIDEN(BODY) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VV_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VV_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VV_PARAMS(e32); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_VX_LOOP_WIDEN(BODY) \ + VI_LOOP_BASE \ + if (sew == e8) { \ + VX_PARAMS(e8); \ + BODY; \ + } else if (sew == e16) { \ + VX_PARAMS(e16); \ + BODY; \ + } else if (sew == e32) { \ + VX_PARAMS(e32); \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_WIDE_OP_AND_ASSIGN(var0, var1, var2, op0, op1, sign) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign##16_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##16_t)(sign##8_t)var0 op0 (sign##16_t)(sign##8_t)var1) + var2; \ + } \ + break; \ + case e16: { \ + sign##32_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##32_t)(sign##16_t)var0 op0 (sign##32_t)(sign##16_t)var1) + var2; \ + } \ + break; \ + default: { \ + sign##64_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign##64_t)(sign##32_t)var0 op0 (sign##64_t)(sign##32_t)var1) + var2; \ + } \ + break; \ + } + +#define VI_WIDE_OP_AND_ASSIGN_MIX(var0, var1, var2, op0, op1, sign_d, sign_1, sign_2) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign_d##16_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##16_t)(sign_1##8_t)var0 op0 (sign_2##16_t)(sign_2##8_t)var1) + var2; \ + } \ + break; \ + case e16: { \ + sign_d##32_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##32_t)(sign_1##16_t)var0 op0 (sign_2##32_t)(sign_2##16_t)var1) + var2; \ + } \ + break; \ + default: { \ + sign_d##64_t vd_w = P.VU.elt(rd_num, i); \ + P.VU.elt(rd_num, i, true) = \ + op1((sign_1##64_t)(sign_1##32_t)var0 op0 (sign_2##64_t)(sign_2##32_t)var1) + var2; \ + } \ + break; \ + } + +#define VI_WIDE_WVX_OP(var0, op0, sign) \ + switch (P.VU.vsew) { \ + case e8: { \ + sign##16_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##16_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##16_t)(sign##8_t)var0; \ + } \ + break; \ + case e16: { \ + sign##32_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##32_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##32_t)(sign##16_t)var0; \ + } \ + break; \ + default: { \ + sign##64_t &vd_w = P.VU.elt(rd_num, i, true); \ + sign##64_t vs2_w = P.VU.elt(rs2_num, i); \ + vd_w = vs2_w op0 (sign##64_t)(sign##32_t)var0; \ + } \ + break; \ + } + +// wide reduction loop - signed +#define VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define WIDE_REDUCTION_LOOP(sew1, sew2, BODY) \ + VI_LOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + BODY; \ + VI_LOOP_REDUCTION_END(sew2) + +#define VI_VV_LOOP_WIDE_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(true); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + WIDE_REDUCTION_LOOP(e8, e16, BODY) \ + } else if (sew == e16) { \ + WIDE_REDUCTION_LOOP(e16, e32, BODY) \ + } else if (sew == e32) { \ + WIDE_REDUCTION_LOOP(e32, e64, BODY) \ + } + +// wide reduction loop - unsigned +#define VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + auto &vd_0_des = P.VU.elt::type>(rd_num, 0, true); \ + auto vd_0_res = P.VU.elt::type>(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + auto vs2 = P.VU.elt::type>(rs2_num, i); + +#define WIDE_REDUCTION_ULOOP(sew1, sew2, BODY) \ + VI_ULOOP_WIDE_REDUCTION_BASE(sew1, sew2) \ + BODY; \ + VI_LOOP_REDUCTION_END(sew2) + +#define VI_VV_ULOOP_WIDE_REDUCTION(BODY) \ + VI_CHECK_REDUCTION(true); \ + reg_t sew = P.VU.vsew; \ + if (sew == e8) { \ + WIDE_REDUCTION_ULOOP(e8, e16, BODY) \ + } else if (sew == e16) { \ + WIDE_REDUCTION_ULOOP(e16, e32, BODY) \ + } else if (sew == e32) { \ + WIDE_REDUCTION_ULOOP(e32, e64, BODY) \ + } + +// carry/borrow bit loop +#define VI_VV_LOOP_CARRY(BODY) \ + VI_CHECK_MSS(true); \ + VI_LOOP_CARRY_BASE \ + if (sew == e8) { \ + VV_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + VV_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + VV_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + VV_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_CARRY_END + +#define VI_XI_LOOP_CARRY(BODY) \ + VI_CHECK_MSS(false); \ + VI_LOOP_CARRY_BASE \ + if (sew == e8) { \ + XI_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + XI_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + XI_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + XI_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_CARRY_END + +#define VI_VV_LOOP_WITH_CARRY(BODY) \ + require_vm; \ + VI_CHECK_SSS(true); \ + VI_LOOP_WITH_CARRY_BASE \ + if (sew == e8) { \ + VV_WITH_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + VV_WITH_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + VV_WITH_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + VV_WITH_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_END + +#define VI_XI_LOOP_WITH_CARRY(BODY) \ + require_vm; \ + VI_CHECK_SSS(false); \ + VI_LOOP_WITH_CARRY_BASE \ + if (sew == e8) { \ + XI_WITH_CARRY_PARAMS(e8) \ + BODY; \ + } else if (sew == e16) { \ + XI_WITH_CARRY_PARAMS(e16) \ + BODY; \ + } else if (sew == e32) { \ + XI_WITH_CARRY_PARAMS(e32) \ + BODY; \ + } else if (sew == e64) { \ + XI_WITH_CARRY_PARAMS(e64) \ + BODY; \ + } \ + VI_LOOP_END + +// average loop +#define VI_VV_LOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VV_LOOP({ \ + uint128_t res = ((uint128_t)vs2) op vs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VX_LOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VX_LOOP({ \ + uint128_t res = ((uint128_t)vs2) op rs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VV_ULOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VV_ULOOP({ \ + uint128_t res = ((uint128_t)vs2) op vs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +#define VI_VX_ULOOP_AVG(op) \ +VRM xrm = p->VU.get_vround_mode(); \ +VI_VX_ULOOP({ \ + uint128_t res = ((uint128_t)vs2) op rs1; \ + INT_ROUNDING(res, xrm, 1); \ + vd = res >> 1; \ +}) + +// +// vector: load/store helper +// +#define VI_STRIP(inx) \ + reg_t vreg_inx = inx; + +#define VI_DUPLICATE_VREG(reg_num, idx_sew) \ +reg_t index[P.VU.vlmax]; \ + for (reg_t i = 0; i < P.VU.vlmax && P.VU.vl->read() != 0; ++i) { \ + switch (idx_sew) { \ + case e8: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e16: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e32: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + case e64: \ + index[i] = P.VU.elt(reg_num, i); \ + break; \ + } \ +} + +#define VI_LD(stride, offset, elt_width, is_mask_ldst) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + VI_CHECK_LOAD(elt_width, is_mask_ldst); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + elt_width##_t val = MMU.load_##elt_width( \ + baseAddr + (stride) + (offset) * sizeof(elt_width##_t)); \ + P.VU.elt(vd + fn * emul, vreg_inx, true) = val; \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_LD_INDEX(elt_width, is_seg) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + if (!is_seg) \ + require(nf == 1); \ + VI_CHECK_LD_INDEX(elt_width); \ + VI_DUPLICATE_VREG(insn.rs2(), elt_width); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + switch (P.VU.vsew) { \ + case e8: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint8(baseAddr + index[i] + fn * 1); \ + break; \ + case e16: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint16(baseAddr + index[i] + fn * 2); \ + break; \ + case e32: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint32(baseAddr + index[i] + fn * 4); \ + break; \ + default: \ + P.VU.elt(vd + fn * flmul, vreg_inx, true) = \ + MMU.load_uint64(baseAddr + index[i] + fn * 8); \ + break; \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST(stride, offset, elt_width, is_mask_ldst) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = is_mask_ldst ? ((P.VU.vl->read() + 7) / 8) : P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + VI_CHECK_STORE(elt_width, is_mask_ldst); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_STRIP(i) \ + VI_ELEMENT_SKIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + elt_width##_t val = P.VU.elt(vs3 + fn * emul, vreg_inx); \ + MMU.store_##elt_width( \ + baseAddr + (stride) + (offset) * sizeof(elt_width##_t), val); \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST_INDEX(elt_width, is_seg) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + if (!is_seg) \ + require(nf == 1); \ + VI_CHECK_ST_INDEX(elt_width); \ + VI_DUPLICATE_VREG(insn.rs2(), elt_width); \ + for (reg_t i = 0; i < vl; ++i) { \ + VI_STRIP(i) \ + VI_ELEMENT_SKIP(i); \ + P.VU.vstart->write(i); \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + switch (P.VU.vsew) { \ + case e8: \ + MMU.store_uint8(baseAddr + index[i] + fn * 1, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + case e16: \ + MMU.store_uint16(baseAddr + index[i] + fn * 2, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + case e32: \ + MMU.store_uint32(baseAddr + index[i] + fn * 4, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + default: \ + MMU.store_uint64(baseAddr + index[i] + fn * 8, \ + P.VU.elt(vs3 + fn * flmul, vreg_inx)); \ + break; \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_LDST_FF(elt_width) \ + const reg_t nf = insn.v_nf() + 1; \ + const reg_t sew = p->VU.vsew; \ + const reg_t vl = p->VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t rd_num = insn.rd(); \ + VI_CHECK_LOAD(elt_width, false); \ + bool early_stop = false; \ + for (reg_t i = p->VU.vstart->read(); i < vl; ++i) { \ + VI_STRIP(i); \ + VI_ELEMENT_SKIP(i); \ + \ + for (reg_t fn = 0; fn < nf; ++fn) { \ + uint64_t val; \ + try { \ + val = MMU.load_##elt_width( \ + baseAddr + (i * nf + fn) * sizeof(elt_width##_t)); \ + } catch (trap_t& t) { \ + if (i == 0) \ + throw; /* Only take exception on zeroth element */ \ + /* Reduce VL if an exception occurs on a later element */ \ + early_stop = true; \ + P.VU.vl->write_raw(i); \ + break; \ + } \ + p->VU.elt(rd_num + fn * emul, vreg_inx, true) = val; \ + } \ + \ + if (early_stop) { \ + break; \ + } \ + } \ + p->VU.vstart->write(0); + +#define VI_LD_WHOLE(elt_width) \ + require_vector_novtype(true, false); \ + require(sizeof(elt_width ## _t) * 8 <= P.VU.ELEN); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + const reg_t len = insn.v_nf() + 1; \ + require_align(vd, len); \ + const reg_t elt_per_reg = P.VU.vlenb / sizeof(elt_width ## _t); \ + const reg_t size = len * elt_per_reg; \ + if (P.VU.vstart->read() < size) { \ + reg_t i = P.VU.vstart->read() / elt_per_reg; \ + reg_t off = P.VU.vstart->read() % elt_per_reg; \ + if (off) { \ + for (reg_t pos = off; pos < elt_per_reg; ++pos) { \ + auto val = MMU.load_## elt_width(baseAddr + \ + P.VU.vstart->read() * sizeof(elt_width ## _t)); \ + P.VU.elt(vd + i, pos, true) = val; \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + ++i; \ + } \ + for (; i < len; ++i) { \ + for (reg_t pos = 0; pos < elt_per_reg; ++pos) { \ + auto val = MMU.load_## elt_width(baseAddr + \ + P.VU.vstart->read() * sizeof(elt_width ## _t)); \ + P.VU.elt(vd + i, pos, true) = val; \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +#define VI_ST_WHOLE \ + require_vector_novtype(true, false); \ + const reg_t baseAddr = RS1; \ + const reg_t vs3 = insn.rd(); \ + const reg_t len = insn.v_nf() + 1; \ + require_align(vs3, len); \ + const reg_t size = len * P.VU.vlenb; \ + \ + if (P.VU.vstart->read() < size) { \ + reg_t i = P.VU.vstart->read() / P.VU.vlenb; \ + reg_t off = P.VU.vstart->read() % P.VU.vlenb; \ + if (off) { \ + for (reg_t pos = off; pos < P.VU.vlenb; ++pos) { \ + auto val = P.VU.elt(vs3 + i, pos); \ + MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + i++; \ + } \ + for (; i < len; ++i) { \ + for (reg_t pos = 0; pos < P.VU.vlenb; ++pos) { \ + auto val = P.VU.elt(vs3 + i, pos); \ + MMU.store_uint8(baseAddr + P.VU.vstart->read(), val); \ + P.VU.vstart->write(P.VU.vstart->read() + 1); \ + } \ + } \ + } \ + P.VU.vstart->write(0); + +// +// vector: amo +// +#define VI_AMO(op, type, idx_type) \ + require_vector(false); \ + require_align(insn.rd(), P.VU.vflmul); \ + require(P.VU.vsew <= P.get_xlen() && P.VU.vsew >= 32); \ + require_align(insn.rd(), P.VU.vflmul); \ + float vemul = ((float)idx_type / P.VU.vsew * P.VU.vflmul); \ + require(vemul >= 0.125 && vemul <= 8); \ + require_align(insn.rs2(), vemul); \ + if (insn.v_wd()) { \ + require_vm; \ + if (idx_type > P.VU.vsew) { \ + if (insn.rd() != insn.rs2()) \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } else if (idx_type < P.VU.vsew) { \ + if (vemul < 1) { \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), vemul); \ + } \ + } \ + } \ + VI_DUPLICATE_VREG(insn.rs2(), idx_type); \ + const reg_t vl = P.VU.vl->read(); \ + const reg_t baseAddr = RS1; \ + const reg_t vd = insn.rd(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_ELEMENT_SKIP(i); \ + VI_STRIP(i); \ + P.VU.vstart->write(i); \ + switch (P.VU.vsew) { \ + case e32: { \ + auto vs3 = P.VU.elt< type ## 32_t>(vd, vreg_inx); \ + auto val = MMU.amo_uint32(baseAddr + index[i], [&](type ## 32_t lhs) { op }); \ + if (insn.v_wd()) \ + P.VU.elt< type ## 32_t>(vd, vreg_inx, true) = val; \ + } \ + break; \ + case e64: { \ + auto vs3 = P.VU.elt< type ## 64_t>(vd, vreg_inx); \ + auto val = MMU.amo_uint64(baseAddr + index[i], [&](type ## 64_t lhs) { op }); \ + if (insn.v_wd()) \ + P.VU.elt< type ## 64_t>(vd, vreg_inx, true) = val; \ + } \ + break; \ + default: \ + require(0); \ + break; \ + } \ + } \ + P.VU.vstart->write(0); + +// vector: sign/unsiged extension +#define VI_VV_EXT(div, type) \ + require(insn.rd() != insn.rs2()); \ + require_vm; \ + reg_t from = P.VU.vsew / div; \ + require(from >= e8 && from <= e64); \ + require(((float)P.VU.vflmul / div) >= 0.125 && ((float)P.VU.vflmul / div) <= 8 ); \ + require_align(insn.rd(), P.VU.vflmul); \ + require_align(insn.rs2(), P.VU.vflmul / div); \ + if ((P.VU.vflmul / div) < 1) { \ + require_noover(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \ + } else { \ + require_noover_widen(insn.rd(), P.VU.vflmul, insn.rs2(), P.VU.vflmul / div); \ + } \ + reg_t pat = (((P.VU.vsew >> 3) << 4) | from >> 3); \ + VI_GENERAL_LOOP_BASE \ + VI_LOOP_ELEMENT_SKIP(); \ + switch (pat) { \ + case 0x21: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x41: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x81: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x42: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x82: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x84: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + case 0x88: \ + P.VU.elt(rd_num, i, true) = P.VU.elt(rs2_num, i); \ + break; \ + default: \ + break; \ + } \ + VI_LOOP_END + +// +// vector: vfp helper +// +#define VI_VFP_COMMON \ + require_fp; \ + require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || \ + (P.VU.vsew == e32 && p->extension_enabled('F')) || \ + (P.VU.vsew == e64 && p->extension_enabled('D'))); \ + require_vector(true); \ + require(STATE.frm->read() < 0x5); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + softfloat_roundingMode = STATE.frm->read(); + +#define VI_VFP_LOOP_BASE \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_LOOP_CMP_BASE \ + VI_VFP_COMMON \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + uint64_t mmask = UINT64_C(1) << mpos; \ + uint64_t &vd = P.VU.elt(rd_num, midx, true); \ + uint64_t res = 0; + +#define VI_VFP_LOOP_REDUCTION_BASE(width) \ + float##width##_t vd_0 = P.VU.elt(rd_num, 0); \ + float##width##_t vs1_0 = P.VU.elt(rs1_num, 0); \ + vd_0 = vs1_0; \ + bool is_active = false; \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + float##width##_t vs2 = P.VU.elt(rs2_num, i); \ + is_active = true; \ + +#define VI_VFP_LOOP_WIDE_REDUCTION_BASE \ + VI_VFP_COMMON \ + float64_t vd_0 = f64(P.VU.elt(rs1_num, 0).v); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_LOOP_END \ + } \ + P.VU.vstart->write(0); \ + +#define VI_VFP_LOOP_REDUCTION_END(x) \ + } \ + P.VU.vstart->write(0); \ + if (vl > 0) { \ + if (is_propagate && !is_active) { \ + switch (x) { \ + case e16: { \ + auto ret = f16_classify(f16(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF16UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + case e32: { \ + auto ret = f32_classify(f32(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF32UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + case e64: { \ + auto ret = f64_classify(f64(vd_0.v)); \ + if (ret & 0x300) { \ + if (ret & 0x100) { \ + softfloat_exceptionFlags |= softfloat_flag_invalid; \ + set_fp_exceptions; \ + } \ + P.VU.elt(rd_num, 0, true) = defaultNaNF64UI; \ + } else { \ + P.VU.elt(rd_num, 0, true) = vd_0.v; \ + } \ + } \ + break; \ + } \ + } else { \ + P.VU.elt::type>(rd_num, 0, true) = vd_0.v; \ + } \ + } + +#define VI_VFP_LOOP_CMP_END \ + switch (P.VU.vsew) { \ + case e16: \ + case e32: \ + case e64: { \ + vd = (vd & ~mmask) | (((res) << mpos) & mmask); \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + } \ + P.VU.vstart->write(0); + +#define VI_VFP_VV_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VV_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VV_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VV_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_V_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_V_PARAMS(16); \ + BODY16; \ + break; \ + } \ + case e32: { \ + VFP_V_PARAMS(32); \ + BODY32; \ + break; \ + } \ + case e64: { \ + VFP_V_PARAMS(64); \ + BODY64; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + set_fp_exceptions; \ + VI_VFP_LOOP_END + +#define VI_VFP_VV_LOOP_REDUCTION(BODY16, BODY32, BODY64) \ + VI_CHECK_REDUCTION(false) \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: { \ + VI_VFP_LOOP_REDUCTION_BASE(16) \ + BODY16; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e16) \ + break; \ + } \ + case e32: { \ + VI_VFP_LOOP_REDUCTION_BASE(32) \ + BODY32; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e32) \ + break; \ + } \ + case e64: { \ + VI_VFP_LOOP_REDUCTION_BASE(64) \ + BODY64; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e64) \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + +#define VI_VFP_VV_LOOP_WIDE_REDUCTION(BODY16, BODY32) \ + VI_CHECK_REDUCTION(true) \ + VI_VFP_COMMON \ + require((P.VU.vsew == e16 && p->extension_enabled('F')) || \ + (P.VU.vsew == e32 && p->extension_enabled('D'))); \ + bool is_active = false; \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t vd_0 = P.VU.elt(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + is_active = true; \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e32) \ + break; \ + } \ + case e32: { \ + float64_t vd_0 = P.VU.elt(rs1_num, 0); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); \ + is_active = true; \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + VI_VFP_LOOP_REDUCTION_END(e64) \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + +#define VI_VFP_VF_LOOP(BODY16, BODY32, BODY64) \ + VI_CHECK_SSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VF_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VF_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VF_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VF; \ + VI_VFP_LOOP_END + +#define VI_VFP_VV_LOOP_CMP(BODY16, BODY32, BODY64) \ + VI_CHECK_MSS(true); \ + VI_VFP_LOOP_CMP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VV_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VV_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VV_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + VI_VFP_LOOP_CMP_END \ + +#define VI_VFP_VF_LOOP_CMP(BODY16, BODY32, BODY64) \ + VI_CHECK_MSS(false); \ + VI_VFP_LOOP_CMP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + VFP_VF_PARAMS(16); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + VFP_VF_PARAMS(32); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + case e64: { \ + VFP_VF_PARAMS(64); \ + BODY64; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + VI_VFP_LOOP_CMP_END \ + +#define VI_VFP_VF_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DSS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + + +#define VI_VFP_VV_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DSS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = f16_to_f32(P.VU.elt(rs2_num, i)); \ + float32_t vs1 = f16_to_f32(P.VU.elt(rs1_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = f32_to_f64(P.VU.elt(rs2_num, i)); \ + float64_t vs1 = f32_to_f64(P.VU.elt(rs1_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + break; \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_WF_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DDS(false); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = P.VU.elt(rs2_num, i); \ + float32_t rs1 = f16_to_f32(f16(READ_FREG(rs1_num))); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = P.VU.elt(rs2_num, i); \ + float64_t rs1 = f32_to_f64(f32(READ_FREG(rs1_num))); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_WV_LOOP_WIDE(BODY16, BODY32) \ + VI_CHECK_DDS(true); \ + VI_VFP_LOOP_BASE \ + switch (P.VU.vsew) { \ + case e16: { \ + float32_t &vd = P.VU.elt(rd_num, i, true); \ + float32_t vs2 = P.VU.elt(rs2_num, i); \ + float32_t vs1 = f16_to_f32(P.VU.elt(rs1_num, i)); \ + BODY16; \ + set_fp_exceptions; \ + break; \ + } \ + case e32: { \ + float64_t &vd = P.VU.elt(rd_num, i, true); \ + float64_t vs2 = P.VU.elt(rs2_num, i); \ + float64_t vs1 = f32_to_f64(P.VU.elt(rs1_num, i)); \ + BODY32; \ + set_fp_exceptions; \ + break; \ + } \ + default: \ + require(0); \ + }; \ + DEBUG_RVV_FP_VV; \ + VI_VFP_LOOP_END + +#define VI_VFP_LOOP_SCALE_BASE \ + require_fp; \ + require_vector(true); \ + require(STATE.frm->read() < 0x5); \ + reg_t vl = P.VU.vl->read(); \ + reg_t rd_num = insn.rd(); \ + reg_t rs1_num = insn.rs1(); \ + reg_t rs2_num = insn.rs2(); \ + softfloat_roundingMode = STATE.frm->read(); \ + for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { \ + VI_LOOP_ELEMENT_SKIP(); + +#define VI_VFP_CVT_LOOP(CVT_PARAMS, CHECK, BODY) \ + CHECK \ + VI_VFP_LOOP_SCALE_BASE \ + CVT_PARAMS \ + BODY \ + set_fp_exceptions; \ + VI_VFP_LOOP_END + +#define VI_VFP_CVT_INT_TO_FP(BODY16, BODY32, BODY64, sign) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 16, sign), \ + { p->extension_enabled(EXT_ZFH); }, \ + BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 32, sign), \ + { p->extension_enabled('F'); }, \ + BODY32); } \ + break; \ + case e64: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 64, sign), \ + { p->extension_enabled('D'); }, \ + BODY64); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_CVT_FP_TO_INT(BODY16, BODY32, BODY64, sign) \ + VI_CHECK_SSS(false); \ + VI_VFP_COMMON \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 16, sign), \ + { p->extension_enabled(EXT_ZFH); }, \ + BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 32, sign), \ + { p->extension_enabled('F'); }, \ + BODY32); } \ + break; \ + case e64: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 64, sign), \ + { p->extension_enabled('D'); }, \ + BODY64); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_FP_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(16, 32), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 64), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_INT_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e8: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(8, 16, sign), CHECK8, BODY8); } \ + break; \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(16, 32, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 64, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_WCVT_FP_TO_INT(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_DSS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 32, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 64, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_FP_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(32, 16), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_FP_PARAMS(64, 32), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_INT_TO_FP(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(32, 16, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_INT_TO_FP_PARAMS(64, 32, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +#define VI_VFP_NCVT_FP_TO_INT(BODY8, BODY16, BODY32, \ + CHECK8, CHECK16, CHECK32, \ + sign) \ + VI_CHECK_SDS(false); \ + switch (P.VU.vsew) { \ + case e8: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(16, 8, sign), CHECK8, BODY8); } \ + break; \ + case e16: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(32, 16, sign), CHECK16, BODY16); } \ + break; \ + case e32: \ + { VI_VFP_CVT_LOOP(CVT_FP_TO_INT_PARAMS(64, 32, sign), CHECK32, BODY32); } \ + break; \ + default: \ + require(0); \ + break; \ + } + +// The p-extension support is contributed by +// Programming Langauge Lab, Department of Computer Science, National Tsing-Hua University, Taiwan + +#define P_FIELD(R, INDEX, SIZE) \ + (type_sew_t::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE)) + +#define P_UFIELD(R, INDEX, SIZE) \ + (type_usew_t::type)get_field(R, make_mask64(((INDEX) * SIZE), SIZE)) + +#define P_B(R, INDEX) P_UFIELD(R, INDEX, 8) +#define P_H(R, INDEX) P_UFIELD(R, INDEX, 16) +#define P_W(R, INDEX) P_UFIELD(R, INDEX, 32) +#define P_SB(R, INDEX) P_FIELD(R, INDEX, 8) +#define P_SH(R, INDEX) P_FIELD(R, INDEX, 16) +#define P_SW(R, INDEX) P_FIELD(R, INDEX, 32) + +#define READ_REG_PAIR(reg) ({ \ + require((reg) % 2 == 0); \ + (reg) == 0 ? reg_t(0) : \ + (READ_REG((reg) + 1) << 32) + zext32(READ_REG(reg)); }) + +#define RS1_PAIR READ_REG_PAIR(insn.rs1()) +#define RS2_PAIR READ_REG_PAIR(insn.rs2()) +#define RD_PAIR READ_REG_PAIR(insn.rd()) + +#define WRITE_PD() \ + rd_tmp = set_field(rd_tmp, make_mask64((i * sizeof(pd) * 8), sizeof(pd) * 8), pd); + +#define WRITE_RD_PAIR(value) \ + if (insn.rd() != 0) { \ + require(insn.rd() % 2 == 0); \ + WRITE_REG(insn.rd(), sext32(value)); \ + WRITE_REG(insn.rd() + 1, (sreg_t(value)) >> 32); \ + } + +#define P_SET_OV(ov) \ + if (ov) P.VU.vxsat->write(1); + +#define P_SAT(R, BIT) \ + if (R > INT##BIT##_MAX) { \ + R = INT##BIT##_MAX; \ + P_SET_OV(1); \ + } else if (R < INT##BIT##_MIN) { \ + R = INT##BIT##_MIN; \ + P_SET_OV(1); \ + } + +#define P_SATU(R, BIT) \ + if (R > UINT##BIT##_MAX) { \ + R = UINT##BIT##_MAX; \ + P_SET_OV(1); \ + } else if (R < 0) { \ + P_SET_OV(1); \ + R = 0; \ + } + +#define P_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + reg_t rs2 = RS2; \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_ONE_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_I_LOOP_BASE(BIT, IMMBIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + type_usew_t::type imm##IMMBIT##u = insn.p_imm##IMMBIT(); \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_X_LOOP_BASE(BIT, LOWBIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + type_usew_t::type sa = RS2 & ((uint64_t(1) << LOWBIT) - 1); \ + type_sew_t::type ssa = int64_t(RS2) << (64 - LOWBIT) >> (64 - LOWBIT); \ + sreg_t len = xlen / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_MUL_LOOP_BASE(BIT) \ + require_extension(EXT_ZPN); \ + require(BIT == e8 || BIT == e16 || BIT == e32); \ + reg_t rd_tmp = RD; \ + reg_t rs1 = RS1; \ + reg_t rs2 = RS2; \ + sreg_t len = 32 / BIT; \ + for (sreg_t i = len - 1; i >= 0; --i) { + +#define P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32 || BIT == e64); \ + reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \ + reg_t rs1 = zext_xlen(RS1); \ + reg_t rs2 = zext_xlen(RS2); \ + sreg_t len = 64 / BIT; \ + sreg_t len_inner = BIT / BIT_INNER; \ + for (sreg_t i = len - 1; i >= 0; --i) { \ + sreg_t pd_res = P_FIELD(rd_tmp, i, BIT); \ + for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) { + +#define P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32 || BIT == e64); \ + reg_t rd_tmp = USE_RD ? zext_xlen(RD) : 0; \ + reg_t rs1 = zext_xlen(RS1); \ + reg_t rs2 = zext_xlen(RS2); \ + sreg_t len = 64 / BIT; \ + sreg_t len_inner = BIT / BIT_INNER; \ + for (sreg_t i = len - 1; i >=0; --i) { \ + reg_t pd_res = P_UFIELD(rd_tmp, i, BIT); \ + for (sreg_t j = i * len_inner; j < (i + 1) * len_inner; ++j) { + +#define P_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, i, BIT); + +#define P_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, i, BIT); + +#define P_CORSS_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); + +#define P_CORSS_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT); + +#define P_ONE_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); + +#define P_ONE_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_UFIELD(rs1, i, BIT); + +#define P_ONE_SUPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT); \ + auto ps1 = P_FIELD(rs1, i, BIT); + +#define P_MUL_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, i, BIT); + +#define P_MUL_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, i, BIT); + +#define P_MUL_CROSS_PARAMS(BIT) \ + auto pd = P_FIELD(rd_tmp, i, BIT * 2); \ + auto ps1 = P_FIELD(rs1, i, BIT); \ + auto ps2 = P_FIELD(rs2, (i ^ 1), BIT); + +#define P_MUL_CROSS_UPARAMS(BIT) \ + auto pd = P_UFIELD(rd_tmp, i, BIT*2); \ + auto ps1 = P_UFIELD(rs1, i, BIT); \ + auto ps2 = P_UFIELD(rs2, (i ^ 1), BIT); + +#define P_REDUCTION_PARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_FIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_UPARAMS(BIT_INNER) \ + auto ps1 = P_UFIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_UFIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_SUPARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_UFIELD(rs2, j, BIT_INNER); + +#define P_REDUCTION_CROSS_PARAMS(BIT_INNER) \ + auto ps1 = P_FIELD(rs1, j, BIT_INNER); \ + auto ps2 = P_FIELD(rs2, (j ^ 1), BIT_INNER); + +#define P_LOOP_BODY(BIT, BODY) { \ + P_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ULOOP_BODY(BIT, BODY) { \ + P_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ONE_LOOP_BODY(BIT, BODY) { \ + P_ONE_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_CROSS_LOOP_BODY(BIT, BODY) { \ + P_CORSS_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_CROSS_ULOOP_BODY(BIT, BODY) { \ + P_CORSS_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_ONE_ULOOP_BODY(BIT, BODY) { \ + P_ONE_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_LOOP_BODY(BIT, BODY) { \ + P_MUL_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_ULOOP_BODY(BIT, BODY) { \ + P_MUL_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_CROSS_LOOP_BODY(BIT, BODY) { \ + P_MUL_CROSS_PARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_MUL_CROSS_ULOOP_BODY(BIT, BODY) { \ + P_MUL_CROSS_UPARAMS(BIT) \ + BODY \ + WRITE_PD(); \ +} + +#define P_LOOP(BIT, BODY) \ + P_LOOP_BASE(BIT) \ + P_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_ONE_LOOP(BIT, BODY) \ + P_ONE_LOOP_BASE(BIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_ULOOP(BIT, BODY) \ + P_LOOP_BASE(BIT) \ + P_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_CROSS_LOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_CROSS_LOOP_BODY(BIT, BODY1) \ + --i; \ + if (sizeof(#BODY2) == 1) { \ + P_CROSS_LOOP_BODY(BIT, BODY1) \ + } \ + else { \ + P_CROSS_LOOP_BODY(BIT, BODY2) \ + } \ + P_LOOP_END() + +#define P_CROSS_ULOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_CROSS_ULOOP_BODY(BIT, BODY1) \ + --i; \ + P_CROSS_ULOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_STRAIGHT_LOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_LOOP_BODY(BIT, BODY1) \ + --i; \ + P_LOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_STRAIGHT_ULOOP(BIT, BODY1, BODY2) \ + P_LOOP_BASE(BIT) \ + P_ULOOP_BODY(BIT, BODY1) \ + --i; \ + P_ULOOP_BODY(BIT, BODY2) \ + P_LOOP_END() + +#define P_X_LOOP(BIT, RS2_LOW_BIT, BODY) \ + P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_X_ULOOP(BIT, RS2_LOW_BIT, BODY) \ + P_X_LOOP_BASE(BIT, RS2_LOW_BIT) \ + P_ONE_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_I_LOOP(BIT, IMMBIT, BODY) \ + P_I_LOOP_BASE(BIT, IMMBIT) \ + P_ONE_LOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_I_ULOOP(BIT, IMMBIT, BODY) \ + P_I_LOOP_BASE(BIT, IMMBIT) \ + P_ONE_ULOOP_BODY(BIT, BODY) \ + P_LOOP_END() + +#define P_MUL_LOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_LOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_ULOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_ULOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_CROSS_LOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_CROSS_LOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_MUL_CROSS_ULOOP(BIT, BODY) \ + P_MUL_LOOP_BASE(BIT) \ + P_MUL_CROSS_ULOOP_BODY(BIT, BODY) \ + P_PAIR_LOOP_END() + +#define P_REDUCTION_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_PARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_ULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_ULOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_UPARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_ULOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_SULOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_SUPARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_REDUCTION_CROSS_LOOP(BIT, BIT_INNER, USE_RD, IS_SAT, BODY) \ + P_REDUCTION_LOOP_BASE(BIT, BIT_INNER, USE_RD) \ + P_REDUCTION_CROSS_PARAMS(BIT_INNER) \ + BODY \ + P_REDUCTION_LOOP_END(BIT, IS_SAT) + +#define P_LOOP_END() \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_PAIR_LOOP_END() \ + } \ + if (xlen == 32) { \ + WRITE_RD_PAIR(rd_tmp); \ + } \ + else { \ + WRITE_RD(sext_xlen(rd_tmp)); \ + } + +#define P_REDUCTION_LOOP_END(BIT, IS_SAT) \ + } \ + if (IS_SAT) { \ + P_SAT(pd_res, BIT); \ + } \ + type_usew_t::type pd = pd_res; \ + WRITE_PD(); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_REDUCTION_ULOOP_END(BIT, IS_SAT) \ + } \ + if (IS_SAT) { \ + P_SATU(pd_res, BIT); \ + } \ + type_usew_t::type pd = pd_res; \ + WRITE_PD(); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_SUNPKD8(X, Y) \ + require_extension(EXT_ZPN); \ + reg_t rd_tmp = 0; \ + int16_t pd[4] = { \ + P_SB(RS1, Y), \ + P_SB(RS1, X), \ + P_SB(RS1, Y + 4), \ + P_SB(RS1, X + 4), \ + }; \ + if (xlen == 64) { \ + memcpy(&rd_tmp, pd, 8); \ + } else { \ + memcpy(&rd_tmp, pd, 4); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_ZUNPKD8(X, Y) \ + require_extension(EXT_ZPN); \ + reg_t rd_tmp = 0; \ + uint16_t pd[4] = { \ + P_B(RS1, Y), \ + P_B(RS1, X), \ + P_B(RS1, Y + 4), \ + P_B(RS1, X + 4), \ + }; \ + if (xlen == 64) { \ + memcpy(&rd_tmp, pd, 8); \ + } else { \ + memcpy(&rd_tmp, pd, 4); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_PK(BIT, X, Y) \ + require_extension(EXT_ZPN); \ + require(BIT == e16 || BIT == e32); \ + reg_t rd_tmp = 0, rs1 = RS1, rs2 = RS2; \ + for (sreg_t i = 0; i < xlen / BIT / 2; i++) { \ + rd_tmp = set_field(rd_tmp, make_mask64(i * 2 * BIT, BIT), \ + P_UFIELD(RS2, i * 2 + Y, BIT)); \ + rd_tmp = set_field(rd_tmp, make_mask64((i * 2 + 1) * BIT, BIT), \ + P_UFIELD(RS1, i * 2 + X, BIT)); \ + } \ + WRITE_RD(sext_xlen(rd_tmp)); + +#define P_64_PROFILE_BASE() \ + require_extension(EXT_ZPSFOPERAND); \ + sreg_t rd, rs1, rs2; + +#define P_64_UPROFILE_BASE() \ + require_extension(EXT_ZPSFOPERAND); \ + reg_t rd, rs1, rs2; + +#define P_64_PROFILE_PARAM(USE_RD, INPUT_PAIR) \ + if (xlen == 32) { \ + rs1 = INPUT_PAIR ? RS1_PAIR : RS1; \ + rs2 = INPUT_PAIR ? RS2_PAIR : RS2; \ + rd = USE_RD ? RD_PAIR : 0; \ + } else { \ + rs1 = RS1; \ + rs2 = RS2; \ + rd = USE_RD ? RD : 0; \ + } + +#define P_64_PROFILE(BODY) \ + P_64_PROFILE_BASE() \ + P_64_PROFILE_PARAM(false, true) \ + BODY \ + P_64_PROFILE_END() \ + +#define P_64_UPROFILE(BODY) \ + P_64_UPROFILE_BASE() \ + P_64_PROFILE_PARAM(false, true) \ + BODY \ + P_64_PROFILE_END() \ + +#define P_64_PROFILE_REDUCTION(BIT, BODY) \ + P_64_PROFILE_BASE() \ + P_64_PROFILE_PARAM(true, false) \ + for (sreg_t i = 0; i < xlen / BIT; i++) { \ + sreg_t ps1 = P_FIELD(rs1, i, BIT); \ + sreg_t ps2 = P_FIELD(rs2, i, BIT); \ + BODY \ + } \ + P_64_PROFILE_END() \ + +#define P_64_UPROFILE_REDUCTION(BIT, BODY) \ + P_64_UPROFILE_BASE() \ + P_64_PROFILE_PARAM(true, false) \ + for (sreg_t i = 0; i < xlen / BIT; i++) { \ + reg_t ps1 = P_UFIELD(rs1, i, BIT); \ + reg_t ps2 = P_UFIELD(rs2, i, BIT); \ + BODY \ + } \ + P_64_PROFILE_END() \ + +#define P_64_PROFILE_END() \ + if (xlen == 32) { \ + WRITE_RD_PAIR(rd); \ + } else { \ + WRITE_RD(sext_xlen(rd)); \ + } + +#define DECLARE_XENVCFG_VARS(field) \ + reg_t m##field = get_field(STATE.menvcfg->read(), MENVCFG_##field); \ + reg_t s##field = get_field(STATE.senvcfg->read(), SENVCFG_##field); \ + reg_t h##field = get_field(STATE.henvcfg->read(), HENVCFG_##field) + +#define DEBUG_START 0x0 +#define DEBUG_END (0x1000 - 1) + +#endif diff --git a/vendor/riscv-isa-sim/riscv/devices.cc b/vendor/riscv-isa-sim/riscv/devices.cc new file mode 100644 index 00000000..eb677a58 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/devices.cc @@ -0,0 +1,139 @@ +#include "devices.h" +#include "mmu.h" +#include + +void bus_t::add_device(reg_t addr, abstract_device_t* dev) +{ + // Searching devices via lower_bound/upper_bound + // implicitly relies on the underlying std::map + // container to sort the keys and provide ordered + // iteration over this sort, which it does. (python's + // SortedDict is a good analogy) + devices[addr] = dev; +} + +bool bus_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + // Find the device with the base address closest to but + // less than addr (price-is-right search) + auto it = devices.upper_bound(addr); + if (devices.empty() || it == devices.begin()) { + // Either the bus is empty, or there weren't + // any items with a base address <= addr + return false; + } + // Found at least one item with base address <= addr + // The iterator points to the device after this, so + // go back by one item. + it--; + return it->second->load(addr - it->first, len, bytes); +} + +bool bus_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + // See comments in bus_t::load + auto it = devices.upper_bound(addr); + if (devices.empty() || it == devices.begin()) { + return false; + } + it--; + return it->second->store(addr - it->first, len, bytes); +} + +std::pair bus_t::find_device(reg_t addr) +{ + // See comments in bus_t::load + auto it = devices.upper_bound(addr); + if (devices.empty() || it == devices.begin()) { + return std::make_pair((reg_t)0, (abstract_device_t*)NULL); + } + it--; + return std::make_pair(it->first, it->second); +} + +// Type for holding all registered MMIO plugins by name. +using mmio_plugin_map_t = std::map; + +// Simple singleton instance of an mmio_plugin_map_t. +static mmio_plugin_map_t& mmio_plugin_map() +{ + static mmio_plugin_map_t instance; + return instance; +} + +void register_mmio_plugin(const char* name_cstr, + const mmio_plugin_t* mmio_plugin) +{ + std::string name(name_cstr); + if (!mmio_plugin_map().emplace(name, *mmio_plugin).second) { + throw std::runtime_error("Plugin \"" + name + "\" already registered!"); + } +} + +mmio_plugin_device_t::mmio_plugin_device_t(const std::string& name, + const std::string& args) + : plugin(mmio_plugin_map().at(name)), user_data((*plugin.alloc)(args.c_str())) +{ +} + +mmio_plugin_device_t::~mmio_plugin_device_t() +{ + (*plugin.dealloc)(user_data); +} + +bool mmio_plugin_device_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + return (*plugin.load)(user_data, addr, len, bytes); +} + +bool mmio_plugin_device_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + return (*plugin.store)(user_data, addr, len, bytes); +} + +mem_t::mem_t(reg_t size) + : sz(size) +{ + if (size == 0 || size % PGSIZE != 0) + throw std::runtime_error("memory size must be a positive multiple of 4 KiB"); +} + +mem_t::~mem_t() +{ + for (auto& entry : sparse_memory_map) + free(entry.second); +} + +bool mem_t::load_store(reg_t addr, size_t len, uint8_t* bytes, bool store) +{ + if (addr + len < addr || addr + len > sz) + return false; + + while (len > 0) { + auto n = std::min(PGSIZE - (addr % PGSIZE), reg_t(len)); + + if (store) + memcpy(this->contents(addr), bytes, n); + else + memcpy(bytes, this->contents(addr), n); + + addr += n; + bytes += n; + len -= n; + } + + return true; +} + +char* mem_t::contents(reg_t addr) { + reg_t ppn = addr >> PGSHIFT, pgoff = addr % PGSIZE; + auto search = sparse_memory_map.find(ppn); + if (search == sparse_memory_map.end()) { + auto res = (char*)calloc(PGSIZE, 1); + if (res == nullptr) + throw std::bad_alloc(); + sparse_memory_map[ppn] = res; + return res + pgoff; + } + return search->second + pgoff; +} diff --git a/vendor/riscv-isa-sim/riscv/devices.h b/vendor/riscv-isa-sim/riscv/devices.h new file mode 100644 index 00000000..9200f29b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/devices.h @@ -0,0 +1,87 @@ +#ifndef _RISCV_DEVICES_H +#define _RISCV_DEVICES_H + +#include "decode.h" +#include "mmio_plugin.h" +#include "abstract_device.h" +#include "platform.h" +#include +#include +#include + +class processor_t; + +class bus_t : public abstract_device_t { + public: + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + void add_device(reg_t addr, abstract_device_t* dev); + + std::pair find_device(reg_t addr); + + private: + std::map devices; +}; + +class rom_device_t : public abstract_device_t { + public: + rom_device_t(std::vector data); + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + const std::vector& contents() { return data; } + private: + std::vector data; +}; + +class mem_t : public abstract_device_t { + public: + mem_t(reg_t size); + mem_t(const mem_t& that) = delete; + ~mem_t(); + + bool load(reg_t addr, size_t len, uint8_t* bytes) { return load_store(addr, len, bytes, false); } + bool store(reg_t addr, size_t len, const uint8_t* bytes) { return load_store(addr, len, const_cast(bytes), true); } + char* contents(reg_t addr); + reg_t size() { return sz; } + + private: + bool load_store(reg_t addr, size_t len, uint8_t* bytes, bool store); + + std::map sparse_memory_map; + reg_t sz; +}; + +class clint_t : public abstract_device_t { + public: + clint_t(std::vector&, uint64_t freq_hz, bool real_time); + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + size_t size() { return CLINT_SIZE; } + void increment(reg_t inc); + private: + typedef uint64_t mtime_t; + typedef uint64_t mtimecmp_t; + typedef uint32_t msip_t; + std::vector& procs; + uint64_t freq_hz; + bool real_time; + uint64_t real_time_ref_secs; + uint64_t real_time_ref_usecs; + mtime_t mtime; + std::vector mtimecmp; +}; + +class mmio_plugin_device_t : public abstract_device_t { + public: + mmio_plugin_device_t(const std::string& name, const std::string& args); + virtual ~mmio_plugin_device_t() override; + + virtual bool load(reg_t addr, size_t len, uint8_t* bytes) override; + virtual bool store(reg_t addr, size_t len, const uint8_t* bytes) override; + + private: + mmio_plugin_t plugin; + void* user_data; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/disasm.h b/vendor/riscv-isa-sim/riscv/disasm.h new file mode 100644 index 00000000..338cac24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/disasm.h @@ -0,0 +1,109 @@ +// See LICENSE for license details. + +#ifndef _RISCV_DISASM_H +#define _RISCV_DISASM_H + +#include "decode.h" +#include "isa_parser.h" +#include +#include +#include +#include + +extern const char* xpr_name[NXPR]; +extern const char* fpr_name[NFPR]; +extern const char* vr_name[NVPR]; +extern const char* csr_name(int which); + +class arg_t +{ + public: + virtual std::string to_string(insn_t val) const = 0; + virtual ~arg_t() {} +}; + +class disasm_insn_t +{ + public: + NOINLINE disasm_insn_t(const char* name_, uint32_t match, uint32_t mask, + const std::vector& args) + : match(match), mask(mask), args(args) + { + name = name_; + std::replace(name.begin(), name.end(), '_', '.'); + } + + bool operator == (insn_t insn) const + { + return (insn.bits() & mask) == match; + } + + const char* get_name() const + { + return name.c_str(); + } + + std::string to_string(insn_t insn) const + { + std::string s(name); + + if (args.size()) + { + bool next_arg_optional = false; + s += std::string(std::max(1, 8 - int(name.size())), ' '); + for (size_t i = 0; i < args.size(); i++) { + if (args[i] == nullptr) { + next_arg_optional = true; + continue; + } + std::string argString = args[i]->to_string(insn); + if (next_arg_optional) { + next_arg_optional = false; + if (argString.empty()) continue; + } + if (i != 0) s += ", "; + s += argString; + } + } + return s; + } + + uint32_t get_match() const { return match; } + uint32_t get_mask() const { return mask; } + + private: + uint32_t match; + uint32_t mask; + std::vector args; + std::string name; +}; + +class disassembler_t +{ + public: + disassembler_t(const isa_parser_t *isa); + ~disassembler_t(); + + std::string disassemble(insn_t insn) const; + const disasm_insn_t* lookup(insn_t insn) const; + + void add_insn(disasm_insn_t* insn); + + private: + static const int HASH_SIZE = 255; + std::vector chain[HASH_SIZE+1]; + + void add_instructions(const isa_parser_t* isa); + + const disasm_insn_t* probe_once(insn_t insn, size_t idx) const; + + static const unsigned int MASK1 = 0x7f; + static const unsigned int MASK2 = 0xe003; + + static unsigned int hash(insn_bits_t insn, unsigned int mask) + { + return (insn & mask) % HASH_SIZE; + } +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/dts.cc b/vendor/riscv-isa-sim/riscv/dts.cc new file mode 100644 index 00000000..6b47c764 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/dts.cc @@ -0,0 +1,327 @@ +// See LICENSE for license details. + +#include "dts.h" +#include "libfdt.h" +#include "platform.h" +#include +#include +#include +#include +#include +#include +#include + +std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz, + reg_t initrd_start, reg_t initrd_end, + const char* bootargs, + std::vector procs, + std::vector> mems) +{ + std::stringstream s; + s << std::dec << + "/dts-v1/;\n" + "\n" + "/ {\n" + " #address-cells = <2>;\n" + " #size-cells = <2>;\n" + " compatible = \"ucbbar,spike-bare-dev\";\n" + " model = \"ucbbar,spike-bare\";\n" + " chosen {\n"; + if (initrd_start < initrd_end) { + s << " linux,initrd-start = <" << (size_t)initrd_start << ">;\n" + " linux,initrd-end = <" << (size_t)initrd_end << ">;\n"; + if (!bootargs) + bootargs = "root=/dev/ram console=hvc0 earlycon=sbi"; + } else { + if (!bootargs) + bootargs = "console=hvc0 earlycon=sbi"; + } + s << " bootargs = \""; + for (size_t i = 0; i < strlen(bootargs); i++) { + if (bootargs[i] == '"') + s << '\\' << bootargs[i]; + else + s << bootargs[i]; + } + s << "\";\n"; + s << " };\n" + " cpus {\n" + " #address-cells = <1>;\n" + " #size-cells = <0>;\n" + " timebase-frequency = <" << (cpu_hz/insns_per_rtc_tick) << ">;\n"; + for (size_t i = 0; i < procs.size(); i++) { + s << " CPU" << i << ": cpu@" << i << " {\n" + " device_type = \"cpu\";\n" + " reg = <" << i << ">;\n" + " status = \"okay\";\n" + " compatible = \"riscv\";\n" + " riscv,isa = \"" << procs[i]->get_isa().get_isa_string() << "\";\n" + " mmu-type = \"riscv," << (procs[i]->get_isa().get_max_xlen() <= 32 ? "sv32" : "sv57") << "\";\n" + " riscv,pmpregions = <16>;\n" + " riscv,pmpgranularity = <4>;\n" + " clock-frequency = <" << cpu_hz << ">;\n" + " CPU" << i << "_intc: interrupt-controller {\n" + " #address-cells = <2>;\n" + " #interrupt-cells = <1>;\n" + " interrupt-controller;\n" + " compatible = \"riscv,cpu-intc\";\n" + " };\n" + " };\n"; + } + s << " };\n"; + for (auto& m : mems) { + s << std::hex << + " memory@" << m.first << " {\n" + " device_type = \"memory\";\n" + " reg = <0x" << (m.first >> 32) << " 0x" << (m.first & (uint32_t)-1) << + " 0x" << (m.second->size() >> 16 >> 16) << " 0x" << (m.second->size() & (uint32_t)-1) << ">;\n" + " };\n"; + } + s << " soc {\n" + " #address-cells = <2>;\n" + " #size-cells = <2>;\n" + " compatible = \"ucbbar,spike-bare-soc\", \"simple-bus\";\n" + " ranges;\n" + " clint@" << CLINT_BASE << " {\n" + " compatible = \"riscv,clint0\";\n" + " interrupts-extended = <" << std::dec; + for (size_t i = 0; i < procs.size(); i++) + s << "&CPU" << i << "_intc 3 &CPU" << i << "_intc 7 "; + reg_t clintbs = CLINT_BASE; + reg_t clintsz = CLINT_SIZE; + s << std::hex << ">;\n" + " reg = <0x" << (clintbs >> 32) << " 0x" << (clintbs & (uint32_t)-1) << + " 0x" << (clintsz >> 32) << " 0x" << (clintsz & (uint32_t)-1) << ">;\n" + " };\n" + " };\n" + " htif {\n" + " compatible = \"ucb,htif0\";\n" + " };\n" + "};\n"; + return s.str(); +} + +std::string dts_compile(const std::string& dts) +{ + // Convert the DTS to DTB + int dts_pipe[2]; + pid_t dts_pid; + + fflush(NULL); // flush stdout/stderr before forking + if (pipe(dts_pipe) != 0 || (dts_pid = fork()) < 0) { + std::cerr << "Failed to fork dts child: " << strerror(errno) << std::endl; + exit(1); + } + + // Child process to output dts + if (dts_pid == 0) { + close(dts_pipe[0]); + int step, len = dts.length(); + const char *buf = dts.c_str(); + for (int done = 0; done < len; done += step) { + step = write(dts_pipe[1], buf+done, len-done); + if (step == -1) { + std::cerr << "Failed to write dts: " << strerror(errno) << std::endl; + exit(1); + } + } + close(dts_pipe[1]); + exit(0); + } + + pid_t dtb_pid; + int dtb_pipe[2]; + if (pipe(dtb_pipe) != 0 || (dtb_pid = fork()) < 0) { + std::cerr << "Failed to fork dtb child: " << strerror(errno) << std::endl; + exit(1); + } + + // Child process to output dtb + if (dtb_pid == 0) { + dup2(dts_pipe[0], 0); + dup2(dtb_pipe[1], 1); + close(dts_pipe[0]); + close(dts_pipe[1]); + close(dtb_pipe[0]); + close(dtb_pipe[1]); + execlp(DTC, DTC, "-O", "dtb", 0); + std::cerr << "Failed to run " DTC ": " << strerror(errno) << std::endl; + exit(1); + } + + close(dts_pipe[1]); + close(dts_pipe[0]); + close(dtb_pipe[1]); + + // Read-out dtb + std::stringstream dtb; + + int got; + char buf[4096]; + while ((got = read(dtb_pipe[0], buf, sizeof(buf))) > 0) { + dtb.write(buf, got); + } + if (got == -1) { + std::cerr << "Failed to read dtb: " << strerror(errno) << std::endl; + exit(1); + } + close(dtb_pipe[0]); + + // Reap children + int status; + waitpid(dts_pid, &status, 0); + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + std::cerr << "Child dts process failed" << std::endl; + exit(1); + } + waitpid(dtb_pid, &status, 0); + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + std::cerr << "Child dtb process failed" << std::endl; + exit(1); + } + + return dtb.str(); +} + + +static int fdt_get_node_addr_size(void *fdt, int node, reg_t *addr, + unsigned long *size, const char *field) +{ + int parent, len, i; + int cell_addr, cell_size; + const fdt32_t *prop_addr, *prop_size; + uint64_t temp = 0; + + parent = fdt_parent_offset(fdt, node); + if (parent < 0) + return parent; + + cell_addr = fdt_address_cells(fdt, parent); + if (cell_addr < 1) + return -ENODEV; + + cell_size = fdt_size_cells(fdt, parent); + if (cell_size < 0) + return -ENODEV; + + if (!field) + return -ENODEV; + + prop_addr = (fdt32_t *)fdt_getprop(fdt, node, field, &len); + if (!prop_addr) + return -ENODEV; + prop_size = prop_addr + cell_addr; + + if (addr) { + for (i = 0; i < cell_addr; i++) + temp = (temp << 32) | fdt32_to_cpu(*prop_addr++); + *addr = temp; + } + temp = 0; + + if (size) { + for (i = 0; i < cell_size; i++) + temp = (temp << 32) | fdt32_to_cpu(*prop_size++); + *size = temp; + } + + return 0; +} + +static int check_cpu_node(void *fdt, int cpu_offset) +{ + int len; + const void *prop; + + if (!fdt || cpu_offset < 0) + return -EINVAL; + + prop = fdt_getprop(fdt, cpu_offset, "device_type", &len); + if (!prop || !len) + return -EINVAL; + if (strncmp ((char *)prop, "cpu", strlen ("cpu"))) + return -EINVAL; + + return 0; +} + + +int fdt_get_offset(void *fdt, const char *field) +{ + return fdt_path_offset(fdt, field); +} + +int fdt_get_first_subnode(void *fdt, int node) +{ + return fdt_first_subnode(fdt, node); +} + +int fdt_get_next_subnode(void *fdt, int node) +{ + return fdt_next_subnode(fdt, node); +} + +int fdt_parse_clint(void *fdt, reg_t *clint_addr, + const char *compatible) +{ + int nodeoffset, rc; + + nodeoffset = fdt_node_offset_by_compatible(fdt, -1, compatible); + if (nodeoffset < 0) + return nodeoffset; + + rc = fdt_get_node_addr_size(fdt, nodeoffset, clint_addr, NULL, "reg"); + if (rc < 0 || !clint_addr) + return -ENODEV; + + return 0; +} + +int fdt_parse_pmp_num(void *fdt, int cpu_offset, reg_t *pmp_num) +{ + int rc; + + if ((rc = check_cpu_node(fdt, cpu_offset)) < 0) + return rc; + + rc = fdt_get_node_addr_size(fdt, cpu_offset, pmp_num, NULL, + "riscv,pmpregions"); + if (rc < 0 || !pmp_num) + return -ENODEV; + + return 0; +} + +int fdt_parse_pmp_alignment(void *fdt, int cpu_offset, reg_t *pmp_align) +{ + int rc; + + if ((rc = check_cpu_node(fdt, cpu_offset)) < 0) + return rc; + + rc = fdt_get_node_addr_size(fdt, cpu_offset, pmp_align, NULL, + "riscv,pmpgranularity"); + if (rc < 0 || !pmp_align) + return -ENODEV; + + return 0; +} + +int fdt_parse_mmu_type(void *fdt, int cpu_offset, const char **mmu_type) +{ + assert(mmu_type); + + int len, rc; + const void *prop; + + if ((rc = check_cpu_node(fdt, cpu_offset)) < 0) + return rc; + + prop = fdt_getprop(fdt, cpu_offset, "mmu-type", &len); + if (!prop || !len) + return -EINVAL; + + *mmu_type = (const char *)prop; + + return 0; +} diff --git a/vendor/riscv-isa-sim/riscv/dts.h b/vendor/riscv-isa-sim/riscv/dts.h new file mode 100644 index 00000000..62081511 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/dts.h @@ -0,0 +1,27 @@ +// See LICENSE for license details. +#ifndef _RISCV_DTS_H +#define _RISCV_DTS_H + +#include "devices.h" +#include "processor.h" +#include "mmu.h" +#include + +std::string make_dts(size_t insns_per_rtc_tick, size_t cpu_hz, + reg_t initrd_start, reg_t initrd_end, + const char* bootargs, + std::vector procs, + std::vector> mems); + +std::string dts_compile(const std::string& dts); + +int fdt_get_offset(void *fdt, const char *field); +int fdt_get_first_subnode(void *fdt, int node); +int fdt_get_next_subnode(void *fdt, int node); + +int fdt_parse_clint(void *fdt, reg_t *clint_addr, + const char *compatible); +int fdt_parse_pmp_num(void *fdt, int cpu_offset, reg_t *pmp_num); +int fdt_parse_pmp_alignment(void *fdt, int cpu_offset, reg_t *pmp_align); +int fdt_parse_mmu_type(void *fdt, int cpu_offset, const char **mmu_type); +#endif diff --git a/vendor/riscv-isa-sim/riscv/encoding.h b/vendor/riscv-isa-sim/riscv/encoding.h new file mode 100644 index 00000000..e6dbd7c0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/encoding.h @@ -0,0 +1,4810 @@ +/* + * This file is auto-generated by running 'make' in + * https://github.com/riscv/riscv-opcodes (d2b9aea) + */ + +/* See LICENSE for license details. */ + +#ifndef RISCV_CSR_ENCODING_H +#define RISCV_CSR_ENCODING_H + +#define MSTATUS_UIE 0x00000001 +#define MSTATUS_SIE 0x00000002 +#define MSTATUS_HIE 0x00000004 +#define MSTATUS_MIE 0x00000008 +#define MSTATUS_UPIE 0x00000010 +#define MSTATUS_SPIE 0x00000020 +#define MSTATUS_UBE 0x00000040 +#define MSTATUS_MPIE 0x00000080 +#define MSTATUS_SPP 0x00000100 +#define MSTATUS_VS 0x00000600 +#define MSTATUS_MPP 0x00001800 +#define MSTATUS_FS 0x00006000 +#define MSTATUS_XS 0x00018000 +#define MSTATUS_MPRV 0x00020000 +#define MSTATUS_SUM 0x00040000 +#define MSTATUS_MXR 0x00080000 +#define MSTATUS_TVM 0x00100000 +#define MSTATUS_TW 0x00200000 +#define MSTATUS_TSR 0x00400000 +#define MSTATUS32_SD 0x80000000 +#define MSTATUS_UXL 0x0000000300000000 +#define MSTATUS_SXL 0x0000000C00000000 +#define MSTATUS_SBE 0x0000001000000000 +#define MSTATUS_MBE 0x0000002000000000 +#define MSTATUS_GVA 0x0000004000000000 +#define MSTATUS_MPV 0x0000008000000000 +#define MSTATUS64_SD 0x8000000000000000 + +#define MSTATUSH_SBE 0x00000010 +#define MSTATUSH_MBE 0x00000020 +#define MSTATUSH_GVA 0x00000040 +#define MSTATUSH_MPV 0x00000080 + +#define SSTATUS_UIE 0x00000001 +#define SSTATUS_SIE 0x00000002 +#define SSTATUS_UPIE 0x00000010 +#define SSTATUS_SPIE 0x00000020 +#define SSTATUS_UBE 0x00000040 +#define SSTATUS_SPP 0x00000100 +#define SSTATUS_VS 0x00000600 +#define SSTATUS_FS 0x00006000 +#define SSTATUS_XS 0x00018000 +#define SSTATUS_SUM 0x00040000 +#define SSTATUS_MXR 0x00080000 +#define SSTATUS32_SD 0x80000000 +#define SSTATUS_UXL 0x0000000300000000 +#define SSTATUS64_SD 0x8000000000000000 + +#define HSTATUS_VSXL 0x300000000 +#define HSTATUS_VTSR 0x00400000 +#define HSTATUS_VTW 0x00200000 +#define HSTATUS_VTVM 0x00100000 +#define HSTATUS_VGEIN 0x0003f000 +#define HSTATUS_HU 0x00000200 +#define HSTATUS_SPVP 0x00000100 +#define HSTATUS_SPV 0x00000080 +#define HSTATUS_GVA 0x00000040 +#define HSTATUS_VSBE 0x00000020 + +#define USTATUS_UIE 0x00000001 +#define USTATUS_UPIE 0x00000010 + +#define DCSR_XDEBUGVER (3U<<30) +#define DCSR_NDRESET (1<<29) +#define DCSR_FULLRESET (1<<28) +#define DCSR_EBREAKM (1<<15) +#define DCSR_EBREAKH (1<<14) +#define DCSR_EBREAKS (1<<13) +#define DCSR_EBREAKU (1<<12) +#define DCSR_STOPCYCLE (1<<10) +#define DCSR_STOPTIME (1<<9) +#define DCSR_CAUSE (7<<6) +#define DCSR_DEBUGINT (1<<5) +#define DCSR_HALT (1<<3) +#define DCSR_STEP (1<<2) +#define DCSR_PRV (3<<0) + +#define DCSR_CAUSE_NONE 0 +#define DCSR_CAUSE_SWBP 1 +#define DCSR_CAUSE_HWBP 2 +#define DCSR_CAUSE_DEBUGINT 3 +#define DCSR_CAUSE_STEP 4 +#define DCSR_CAUSE_HALT 5 +#define DCSR_CAUSE_GROUP 6 + +#define MCONTROL_TYPE(xlen) (0xfULL<<((xlen)-4)) +#define MCONTROL_DMODE(xlen) (1ULL<<((xlen)-5)) +#define MCONTROL_MASKMAX(xlen) (0x3fULL<<((xlen)-11)) + +#define MCONTROL_SELECT (1<<19) +#define MCONTROL_TIMING (1<<18) +#define MCONTROL_ACTION (0x3f<<12) +#define MCONTROL_CHAIN (1<<11) +#define MCONTROL_MATCH (0xf<<7) +#define MCONTROL_M (1<<6) +#define MCONTROL_H (1<<5) +#define MCONTROL_S (1<<4) +#define MCONTROL_U (1<<3) +#define MCONTROL_EXECUTE (1<<2) +#define MCONTROL_STORE (1<<1) +#define MCONTROL_LOAD (1<<0) + +#define MCONTROL_TYPE_NONE 0 +#define MCONTROL_TYPE_MATCH 2 + +#define MCONTROL_ACTION_DEBUG_EXCEPTION 0 +#define MCONTROL_ACTION_DEBUG_MODE 1 +#define MCONTROL_ACTION_TRACE_START 2 +#define MCONTROL_ACTION_TRACE_STOP 3 +#define MCONTROL_ACTION_TRACE_EMIT 4 + +#define MCONTROL_MATCH_EQUAL 0 +#define MCONTROL_MATCH_NAPOT 1 +#define MCONTROL_MATCH_GE 2 +#define MCONTROL_MATCH_LT 3 +#define MCONTROL_MATCH_MASK_LOW 4 +#define MCONTROL_MATCH_MASK_HIGH 5 + +#define MIP_USIP (1 << IRQ_U_SOFT) +#define MIP_SSIP (1 << IRQ_S_SOFT) +#define MIP_VSSIP (1 << IRQ_VS_SOFT) +#define MIP_MSIP (1 << IRQ_M_SOFT) +#define MIP_UTIP (1 << IRQ_U_TIMER) +#define MIP_STIP (1 << IRQ_S_TIMER) +#define MIP_VSTIP (1 << IRQ_VS_TIMER) +#define MIP_MTIP (1 << IRQ_M_TIMER) +#define MIP_UEIP (1 << IRQ_U_EXT) +#define MIP_SEIP (1 << IRQ_S_EXT) +#define MIP_VSEIP (1 << IRQ_VS_EXT) +#define MIP_MEIP (1 << IRQ_M_EXT) +#define MIP_SGEIP (1 << IRQ_S_GEXT) + +#define MIP_S_MASK (MIP_SSIP | MIP_STIP | MIP_SEIP) +#define MIP_VS_MASK (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP) +#define MIP_HS_MASK (MIP_VS_MASK | MIP_SGEIP) + +#define MIDELEG_FORCED_MASK MIP_HS_MASK + +#define SIP_SSIP MIP_SSIP +#define SIP_STIP MIP_STIP + +#define MENVCFG_FIOM 0x00000001 +#define MENVCFG_CBIE 0x00000030 +#define MENVCFG_CBCFE 0x00000040 +#define MENVCFG_CBZE 0x00000080 +#define MENVCFG_PBMTE 0x4000000000000000 +#define MENVCFG_STCE 0x8000000000000000 + +#define MENVCFGH_PBMTE 0x40000000 +#define MENVCFGH_STCE 0x80000000 + +#define HENVCFG_FIOM 0x00000001 +#define HENVCFG_CBIE 0x00000030 +#define HENVCFG_CBCFE 0x00000040 +#define HENVCFG_CBZE 0x00000080 +#define HENVCFG_PBMTE 0x4000000000000000 +#define HENVCFG_STCE 0x8000000000000000 + +#define HENVCFGH_PBMTE 0x40000000 +#define HENVCFGH_STCE 0x80000000 + +#define SENVCFG_FIOM 0x00000001 +#define SENVCFG_CBIE 0x00000030 +#define SENVCFG_CBCFE 0x00000040 +#define SENVCFG_CBZE 0x00000080 + +#define MSECCFG_MML 0x00000001 +#define MSECCFG_MMWP 0x00000002 +#define MSECCFG_RLB 0x00000004 +#define MSECCFG_USEED 0x00000100 +#define MSECCFG_SSEED 0x00000200 + +#define PRV_U 0 +#define PRV_S 1 +#define PRV_M 3 + +#define PRV_HS (PRV_S + 1) + +#define SATP32_MODE 0x80000000 +#define SATP32_ASID 0x7FC00000 +#define SATP32_PPN 0x003FFFFF +#define SATP64_MODE 0xF000000000000000 +#define SATP64_ASID 0x0FFFF00000000000 +#define SATP64_PPN 0x00000FFFFFFFFFFF + +#define SATP_MODE_OFF 0 +#define SATP_MODE_SV32 1 +#define SATP_MODE_SV39 8 +#define SATP_MODE_SV48 9 +#define SATP_MODE_SV57 10 +#define SATP_MODE_SV64 11 + +#define HGATP32_MODE 0x80000000 +#define HGATP32_VMID 0x1FC00000 +#define HGATP32_PPN 0x003FFFFF + +#define HGATP64_MODE 0xF000000000000000 +#define HGATP64_VMID 0x03FFF00000000000 +#define HGATP64_PPN 0x00000FFFFFFFFFFF + +#define HGATP_MODE_OFF 0 +#define HGATP_MODE_SV32X4 1 +#define HGATP_MODE_SV39X4 8 +#define HGATP_MODE_SV48X4 9 +#define HGATP_MODE_SV57X4 10 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define IRQ_U_SOFT 0 +#define IRQ_S_SOFT 1 +#define IRQ_VS_SOFT 2 +#define IRQ_M_SOFT 3 +#define IRQ_U_TIMER 4 +#define IRQ_S_TIMER 5 +#define IRQ_VS_TIMER 6 +#define IRQ_M_TIMER 7 +#define IRQ_U_EXT 8 +#define IRQ_S_EXT 9 +#define IRQ_VS_EXT 10 +#define IRQ_M_EXT 11 +#define IRQ_S_GEXT 12 +#define IRQ_COP 12 +#define IRQ_HOST 13 + +/* page table entry (PTE) fields */ +#define PTE_V 0x001 /* Valid */ +#define PTE_R 0x002 /* Read */ +#define PTE_W 0x004 /* Write */ +#define PTE_X 0x008 /* Execute */ +#define PTE_U 0x010 /* User */ +#define PTE_G 0x020 /* Global */ +#define PTE_A 0x040 /* Accessed */ +#define PTE_D 0x080 /* Dirty */ +#define PTE_SOFT 0x300 /* Reserved for Software */ +#define PTE_RSVD 0x1FC0000000000000 /* Reserved for future standard use */ +#define PTE_PBMT 0x6000000000000000 /* Svpbmt: Page-based memory types */ +#define PTE_N 0x8000000000000000 /* Svnapot: NAPOT translation contiguity */ +#define PTE_ATTR 0xFFC0000000000000 /* All attributes and reserved bits */ + +#define PTE_PPN_SHIFT 10 + +#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V) + +#ifdef __riscv + +#if __riscv_xlen == 64 +# define MSTATUS_SD MSTATUS64_SD +# define SSTATUS_SD SSTATUS64_SD +# define RISCV_PGLEVEL_BITS 9 +# define SATP_MODE SATP64_MODE +#else +# define MSTATUS_SD MSTATUS32_SD +# define SSTATUS_SD SSTATUS32_SD +# define RISCV_PGLEVEL_BITS 10 +# define SATP_MODE SATP32_MODE +#endif +#define RISCV_PGSHIFT 12 +#define RISCV_PGSIZE (1 << RISCV_PGSHIFT) + +#ifndef __ASSEMBLER__ + +#ifdef __GNUC__ + +#define read_csr(reg) ({ unsigned long __tmp; \ + asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \ + __tmp; }) + +#define write_csr(reg, val) ({ \ + asm volatile ("csrw " #reg ", %0" :: "rK"(val)); }) + +#define swap_csr(reg, val) ({ unsigned long __tmp; \ + asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "rK"(val)); \ + __tmp; }) + +#define set_csr(reg, bit) ({ unsigned long __tmp; \ + asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + __tmp; }) + +#define clear_csr(reg, bit) ({ unsigned long __tmp; \ + asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + __tmp; }) + +#define rdtime() read_csr(time) +#define rdcycle() read_csr(cycle) +#define rdinstret() read_csr(instret) + +#endif + +#endif + +#endif + +#endif +/* Automatically generated by parse_opcodes. */ +#ifndef RISCV_ENCODING_H +#define RISCV_ENCODING_H +#define MATCH_SLLI_RV32 0x1013 +#define MASK_SLLI_RV32 0xfe00707f +#define MATCH_SRLI_RV32 0x5013 +#define MASK_SRLI_RV32 0xfe00707f +#define MATCH_SRAI_RV32 0x40005013 +#define MASK_SRAI_RV32 0xfe00707f +#define MATCH_FRFLAGS 0x102073 +#define MASK_FRFLAGS 0xfffff07f +#define MATCH_FSFLAGS 0x101073 +#define MASK_FSFLAGS 0xfff0707f +#define MATCH_FSFLAGSI 0x105073 +#define MASK_FSFLAGSI 0xfff0707f +#define MATCH_FRRM 0x202073 +#define MASK_FRRM 0xfffff07f +#define MATCH_FSRM 0x201073 +#define MASK_FSRM 0xfff0707f +#define MATCH_FSRMI 0x205073 +#define MASK_FSRMI 0xfff0707f +#define MATCH_FSCSR 0x301073 +#define MASK_FSCSR 0xfff0707f +#define MATCH_FRCSR 0x302073 +#define MASK_FRCSR 0xfffff07f +#define MATCH_RDCYCLE 0xc0002073 +#define MASK_RDCYCLE 0xfffff07f +#define MATCH_RDTIME 0xc0102073 +#define MASK_RDTIME 0xfffff07f +#define MATCH_RDINSTRET 0xc0202073 +#define MASK_RDINSTRET 0xfffff07f +#define MATCH_RDCYCLEH 0xc8002073 +#define MASK_RDCYCLEH 0xfffff07f +#define MATCH_RDTIMEH 0xc8102073 +#define MASK_RDTIMEH 0xfffff07f +#define MATCH_RDINSTRETH 0xc8202073 +#define MASK_RDINSTRETH 0xfffff07f +#define MATCH_SCALL 0x73 +#define MASK_SCALL 0xffffffff +#define MATCH_SBREAK 0x100073 +#define MASK_SBREAK 0xffffffff +#define MATCH_FMV_X_S 0xe0000053 +#define MASK_FMV_X_S 0xfff0707f +#define MATCH_FMV_S_X 0xf0000053 +#define MASK_FMV_S_X 0xfff0707f +#define MATCH_FENCE_TSO 0x8330000f +#define MASK_FENCE_TSO 0xfff0707f +#define MATCH_PAUSE 0x100000f +#define MASK_PAUSE 0xffffffff +#define MATCH_BEQ 0x63 +#define MASK_BEQ 0x707f +#define MATCH_BNE 0x1063 +#define MASK_BNE 0x707f +#define MATCH_BLT 0x4063 +#define MASK_BLT 0x707f +#define MATCH_BGE 0x5063 +#define MASK_BGE 0x707f +#define MATCH_BLTU 0x6063 +#define MASK_BLTU 0x707f +#define MATCH_BGEU 0x7063 +#define MASK_BGEU 0x707f +#define MATCH_JALR 0x67 +#define MASK_JALR 0x707f +#define MATCH_JAL 0x6f +#define MASK_JAL 0x7f +#define MATCH_LUI 0x37 +#define MASK_LUI 0x7f +#define MATCH_AUIPC 0x17 +#define MASK_AUIPC 0x7f +#define MATCH_ADDI 0x13 +#define MASK_ADDI 0x707f +#define MATCH_SLTI 0x2013 +#define MASK_SLTI 0x707f +#define MATCH_SLTIU 0x3013 +#define MASK_SLTIU 0x707f +#define MATCH_XORI 0x4013 +#define MASK_XORI 0x707f +#define MATCH_ORI 0x6013 +#define MASK_ORI 0x707f +#define MATCH_ANDI 0x7013 +#define MASK_ANDI 0x707f +#define MATCH_ADD 0x33 +#define MASK_ADD 0xfe00707f +#define MATCH_SUB 0x40000033 +#define MASK_SUB 0xfe00707f +#define MATCH_SLL 0x1033 +#define MASK_SLL 0xfe00707f +#define MATCH_SLT 0x2033 +#define MASK_SLT 0xfe00707f +#define MATCH_SLTU 0x3033 +#define MASK_SLTU 0xfe00707f +#define MATCH_XOR 0x4033 +#define MASK_XOR 0xfe00707f +#define MATCH_SRL 0x5033 +#define MASK_SRL 0xfe00707f +#define MATCH_SRA 0x40005033 +#define MASK_SRA 0xfe00707f +#define MATCH_OR 0x6033 +#define MASK_OR 0xfe00707f +#define MATCH_AND 0x7033 +#define MASK_AND 0xfe00707f +#define MATCH_LB 0x3 +#define MASK_LB 0x707f +#define MATCH_LH 0x1003 +#define MASK_LH 0x707f +#define MATCH_LW 0x2003 +#define MASK_LW 0x707f +#define MATCH_LBU 0x4003 +#define MASK_LBU 0x707f +#define MATCH_LHU 0x5003 +#define MASK_LHU 0x707f +#define MATCH_SB 0x23 +#define MASK_SB 0x707f +#define MATCH_SH 0x1023 +#define MASK_SH 0x707f +#define MATCH_SW 0x2023 +#define MASK_SW 0x707f +#define MATCH_FENCE 0xf +#define MASK_FENCE 0x707f +#define MATCH_FENCE_I 0x100f +#define MASK_FENCE_I 0x707f +#define MATCH_ADDIW 0x1b +#define MASK_ADDIW 0x707f +#define MATCH_SLLIW 0x101b +#define MASK_SLLIW 0xfe00707f +#define MATCH_SRLIW 0x501b +#define MASK_SRLIW 0xfe00707f +#define MATCH_SRAIW 0x4000501b +#define MASK_SRAIW 0xfe00707f +#define MATCH_ADDW 0x3b +#define MASK_ADDW 0xfe00707f +#define MATCH_SUBW 0x4000003b +#define MASK_SUBW 0xfe00707f +#define MATCH_SLLW 0x103b +#define MASK_SLLW 0xfe00707f +#define MATCH_SRLW 0x503b +#define MASK_SRLW 0xfe00707f +#define MATCH_SRAW 0x4000503b +#define MASK_SRAW 0xfe00707f +#define MATCH_LD 0x3003 +#define MASK_LD 0x707f +#define MATCH_LWU 0x6003 +#define MASK_LWU 0x707f +#define MATCH_SD 0x3023 +#define MASK_SD 0x707f +#define MATCH_SLLI 0x1013 +#define MASK_SLLI 0xfc00707f +#define MATCH_SRLI 0x5013 +#define MASK_SRLI 0xfc00707f +#define MATCH_SRAI 0x40005013 +#define MASK_SRAI 0xfc00707f +#define MATCH_MUL 0x2000033 +#define MASK_MUL 0xfe00707f +#define MATCH_MULH 0x2001033 +#define MASK_MULH 0xfe00707f +#define MATCH_MULHSU 0x2002033 +#define MASK_MULHSU 0xfe00707f +#define MATCH_MULHU 0x2003033 +#define MASK_MULHU 0xfe00707f +#define MATCH_DIV 0x2004033 +#define MASK_DIV 0xfe00707f +#define MATCH_DIVU 0x2005033 +#define MASK_DIVU 0xfe00707f +#define MATCH_REM 0x2006033 +#define MASK_REM 0xfe00707f +#define MATCH_REMU 0x2007033 +#define MASK_REMU 0xfe00707f +#define MATCH_MULW 0x200003b +#define MASK_MULW 0xfe00707f +#define MATCH_DIVW 0x200403b +#define MASK_DIVW 0xfe00707f +#define MATCH_DIVUW 0x200503b +#define MASK_DIVUW 0xfe00707f +#define MATCH_REMW 0x200603b +#define MASK_REMW 0xfe00707f +#define MATCH_REMUW 0x200703b +#define MASK_REMUW 0xfe00707f +#define MATCH_AMOADD_W 0x202f +#define MASK_AMOADD_W 0xf800707f +#define MATCH_AMOXOR_W 0x2000202f +#define MASK_AMOXOR_W 0xf800707f +#define MATCH_AMOOR_W 0x4000202f +#define MASK_AMOOR_W 0xf800707f +#define MATCH_AMOAND_W 0x6000202f +#define MASK_AMOAND_W 0xf800707f +#define MATCH_AMOMIN_W 0x8000202f +#define MASK_AMOMIN_W 0xf800707f +#define MATCH_AMOMAX_W 0xa000202f +#define MASK_AMOMAX_W 0xf800707f +#define MATCH_AMOMINU_W 0xc000202f +#define MASK_AMOMINU_W 0xf800707f +#define MATCH_AMOMAXU_W 0xe000202f +#define MASK_AMOMAXU_W 0xf800707f +#define MATCH_AMOSWAP_W 0x800202f +#define MASK_AMOSWAP_W 0xf800707f +#define MATCH_LR_W 0x1000202f +#define MASK_LR_W 0xf9f0707f +#define MATCH_SC_W 0x1800202f +#define MASK_SC_W 0xf800707f +#define MATCH_AMOADD_D 0x302f +#define MASK_AMOADD_D 0xf800707f +#define MATCH_AMOXOR_D 0x2000302f +#define MASK_AMOXOR_D 0xf800707f +#define MATCH_AMOOR_D 0x4000302f +#define MASK_AMOOR_D 0xf800707f +#define MATCH_AMOAND_D 0x6000302f +#define MASK_AMOAND_D 0xf800707f +#define MATCH_AMOMIN_D 0x8000302f +#define MASK_AMOMIN_D 0xf800707f +#define MATCH_AMOMAX_D 0xa000302f +#define MASK_AMOMAX_D 0xf800707f +#define MATCH_AMOMINU_D 0xc000302f +#define MASK_AMOMINU_D 0xf800707f +#define MATCH_AMOMAXU_D 0xe000302f +#define MASK_AMOMAXU_D 0xf800707f +#define MATCH_AMOSWAP_D 0x800302f +#define MASK_AMOSWAP_D 0xf800707f +#define MATCH_LR_D 0x1000302f +#define MASK_LR_D 0xf9f0707f +#define MATCH_SC_D 0x1800302f +#define MASK_SC_D 0xf800707f +#define MATCH_HFENCE_VVMA 0x22000073 +#define MASK_HFENCE_VVMA 0xfe007fff +#define MATCH_HFENCE_GVMA 0x62000073 +#define MASK_HFENCE_GVMA 0xfe007fff +#define MATCH_HLV_B 0x60004073 +#define MASK_HLV_B 0xfff0707f +#define MATCH_HLV_BU 0x60104073 +#define MASK_HLV_BU 0xfff0707f +#define MATCH_HLV_H 0x64004073 +#define MASK_HLV_H 0xfff0707f +#define MATCH_HLV_HU 0x64104073 +#define MASK_HLV_HU 0xfff0707f +#define MATCH_HLVX_HU 0x64304073 +#define MASK_HLVX_HU 0xfff0707f +#define MATCH_HLV_W 0x68004073 +#define MASK_HLV_W 0xfff0707f +#define MATCH_HLVX_WU 0x68304073 +#define MASK_HLVX_WU 0xfff0707f +#define MATCH_HSV_B 0x62004073 +#define MASK_HSV_B 0xfe007fff +#define MATCH_HSV_H 0x66004073 +#define MASK_HSV_H 0xfe007fff +#define MATCH_HSV_W 0x6a004073 +#define MASK_HSV_W 0xfe007fff +#define MATCH_HLV_WU 0x68104073 +#define MASK_HLV_WU 0xfff0707f +#define MATCH_HLV_D 0x6c004073 +#define MASK_HLV_D 0xfff0707f +#define MATCH_HSV_D 0x6e004073 +#define MASK_HSV_D 0xfe007fff +#define MATCH_FADD_S 0x53 +#define MASK_FADD_S 0xfe00007f +#define MATCH_FSUB_S 0x8000053 +#define MASK_FSUB_S 0xfe00007f +#define MATCH_FMUL_S 0x10000053 +#define MASK_FMUL_S 0xfe00007f +#define MATCH_FDIV_S 0x18000053 +#define MASK_FDIV_S 0xfe00007f +#define MATCH_FSGNJ_S 0x20000053 +#define MASK_FSGNJ_S 0xfe00707f +#define MATCH_FSGNJN_S 0x20001053 +#define MASK_FSGNJN_S 0xfe00707f +#define MATCH_FSGNJX_S 0x20002053 +#define MASK_FSGNJX_S 0xfe00707f +#define MATCH_FMIN_S 0x28000053 +#define MASK_FMIN_S 0xfe00707f +#define MATCH_FMAX_S 0x28001053 +#define MASK_FMAX_S 0xfe00707f +#define MATCH_FSQRT_S 0x58000053 +#define MASK_FSQRT_S 0xfff0007f +#define MATCH_FLE_S 0xa0000053 +#define MASK_FLE_S 0xfe00707f +#define MATCH_FLT_S 0xa0001053 +#define MASK_FLT_S 0xfe00707f +#define MATCH_FEQ_S 0xa0002053 +#define MASK_FEQ_S 0xfe00707f +#define MATCH_FCVT_W_S 0xc0000053 +#define MASK_FCVT_W_S 0xfff0007f +#define MATCH_FCVT_WU_S 0xc0100053 +#define MASK_FCVT_WU_S 0xfff0007f +#define MATCH_FMV_X_W 0xe0000053 +#define MASK_FMV_X_W 0xfff0707f +#define MATCH_FCLASS_S 0xe0001053 +#define MASK_FCLASS_S 0xfff0707f +#define MATCH_FCVT_S_W 0xd0000053 +#define MASK_FCVT_S_W 0xfff0007f +#define MATCH_FCVT_S_WU 0xd0100053 +#define MASK_FCVT_S_WU 0xfff0007f +#define MATCH_FMV_W_X 0xf0000053 +#define MASK_FMV_W_X 0xfff0707f +#define MATCH_FLW 0x2007 +#define MASK_FLW 0x707f +#define MATCH_FSW 0x2027 +#define MASK_FSW 0x707f +#define MATCH_FMADD_S 0x43 +#define MASK_FMADD_S 0x600007f +#define MATCH_FMSUB_S 0x47 +#define MASK_FMSUB_S 0x600007f +#define MATCH_FNMSUB_S 0x4b +#define MASK_FNMSUB_S 0x600007f +#define MATCH_FNMADD_S 0x4f +#define MASK_FNMADD_S 0x600007f +#define MATCH_FCVT_L_S 0xc0200053 +#define MASK_FCVT_L_S 0xfff0007f +#define MATCH_FCVT_LU_S 0xc0300053 +#define MASK_FCVT_LU_S 0xfff0007f +#define MATCH_FCVT_S_L 0xd0200053 +#define MASK_FCVT_S_L 0xfff0007f +#define MATCH_FCVT_S_LU 0xd0300053 +#define MASK_FCVT_S_LU 0xfff0007f +#define MATCH_FADD_D 0x2000053 +#define MASK_FADD_D 0xfe00007f +#define MATCH_FSUB_D 0xa000053 +#define MASK_FSUB_D 0xfe00007f +#define MATCH_FMUL_D 0x12000053 +#define MASK_FMUL_D 0xfe00007f +#define MATCH_FDIV_D 0x1a000053 +#define MASK_FDIV_D 0xfe00007f +#define MATCH_FSGNJ_D 0x22000053 +#define MASK_FSGNJ_D 0xfe00707f +#define MATCH_FSGNJN_D 0x22001053 +#define MASK_FSGNJN_D 0xfe00707f +#define MATCH_FSGNJX_D 0x22002053 +#define MASK_FSGNJX_D 0xfe00707f +#define MATCH_FMIN_D 0x2a000053 +#define MASK_FMIN_D 0xfe00707f +#define MATCH_FMAX_D 0x2a001053 +#define MASK_FMAX_D 0xfe00707f +#define MATCH_FCVT_S_D 0x40100053 +#define MASK_FCVT_S_D 0xfff0007f +#define MATCH_FCVT_D_S 0x42000053 +#define MASK_FCVT_D_S 0xfff0007f +#define MATCH_FSQRT_D 0x5a000053 +#define MASK_FSQRT_D 0xfff0007f +#define MATCH_FLE_D 0xa2000053 +#define MASK_FLE_D 0xfe00707f +#define MATCH_FLT_D 0xa2001053 +#define MASK_FLT_D 0xfe00707f +#define MATCH_FEQ_D 0xa2002053 +#define MASK_FEQ_D 0xfe00707f +#define MATCH_FCVT_W_D 0xc2000053 +#define MASK_FCVT_W_D 0xfff0007f +#define MATCH_FCVT_WU_D 0xc2100053 +#define MASK_FCVT_WU_D 0xfff0007f +#define MATCH_FCLASS_D 0xe2001053 +#define MASK_FCLASS_D 0xfff0707f +#define MATCH_FCVT_D_W 0xd2000053 +#define MASK_FCVT_D_W 0xfff0007f +#define MATCH_FCVT_D_WU 0xd2100053 +#define MASK_FCVT_D_WU 0xfff0007f +#define MATCH_FLD 0x3007 +#define MASK_FLD 0x707f +#define MATCH_FSD 0x3027 +#define MASK_FSD 0x707f +#define MATCH_FMADD_D 0x2000043 +#define MASK_FMADD_D 0x600007f +#define MATCH_FMSUB_D 0x2000047 +#define MASK_FMSUB_D 0x600007f +#define MATCH_FNMSUB_D 0x200004b +#define MASK_FNMSUB_D 0x600007f +#define MATCH_FNMADD_D 0x200004f +#define MASK_FNMADD_D 0x600007f +#define MATCH_FCVT_L_D 0xc2200053 +#define MASK_FCVT_L_D 0xfff0007f +#define MATCH_FCVT_LU_D 0xc2300053 +#define MASK_FCVT_LU_D 0xfff0007f +#define MATCH_FMV_X_D 0xe2000053 +#define MASK_FMV_X_D 0xfff0707f +#define MATCH_FCVT_D_L 0xd2200053 +#define MASK_FCVT_D_L 0xfff0007f +#define MATCH_FCVT_D_LU 0xd2300053 +#define MASK_FCVT_D_LU 0xfff0007f +#define MATCH_FMV_D_X 0xf2000053 +#define MASK_FMV_D_X 0xfff0707f +#define MATCH_FADD_Q 0x6000053 +#define MASK_FADD_Q 0xfe00007f +#define MATCH_FSUB_Q 0xe000053 +#define MASK_FSUB_Q 0xfe00007f +#define MATCH_FMUL_Q 0x16000053 +#define MASK_FMUL_Q 0xfe00007f +#define MATCH_FDIV_Q 0x1e000053 +#define MASK_FDIV_Q 0xfe00007f +#define MATCH_FSGNJ_Q 0x26000053 +#define MASK_FSGNJ_Q 0xfe00707f +#define MATCH_FSGNJN_Q 0x26001053 +#define MASK_FSGNJN_Q 0xfe00707f +#define MATCH_FSGNJX_Q 0x26002053 +#define MASK_FSGNJX_Q 0xfe00707f +#define MATCH_FMIN_Q 0x2e000053 +#define MASK_FMIN_Q 0xfe00707f +#define MATCH_FMAX_Q 0x2e001053 +#define MASK_FMAX_Q 0xfe00707f +#define MATCH_FCVT_S_Q 0x40300053 +#define MASK_FCVT_S_Q 0xfff0007f +#define MATCH_FCVT_Q_S 0x46000053 +#define MASK_FCVT_Q_S 0xfff0007f +#define MATCH_FCVT_D_Q 0x42300053 +#define MASK_FCVT_D_Q 0xfff0007f +#define MATCH_FCVT_Q_D 0x46100053 +#define MASK_FCVT_Q_D 0xfff0007f +#define MATCH_FSQRT_Q 0x5e000053 +#define MASK_FSQRT_Q 0xfff0007f +#define MATCH_FLE_Q 0xa6000053 +#define MASK_FLE_Q 0xfe00707f +#define MATCH_FLT_Q 0xa6001053 +#define MASK_FLT_Q 0xfe00707f +#define MATCH_FEQ_Q 0xa6002053 +#define MASK_FEQ_Q 0xfe00707f +#define MATCH_FCVT_W_Q 0xc6000053 +#define MASK_FCVT_W_Q 0xfff0007f +#define MATCH_FCVT_WU_Q 0xc6100053 +#define MASK_FCVT_WU_Q 0xfff0007f +#define MATCH_FCLASS_Q 0xe6001053 +#define MASK_FCLASS_Q 0xfff0707f +#define MATCH_FCVT_Q_W 0xd6000053 +#define MASK_FCVT_Q_W 0xfff0007f +#define MATCH_FCVT_Q_WU 0xd6100053 +#define MASK_FCVT_Q_WU 0xfff0007f +#define MATCH_FLQ 0x4007 +#define MASK_FLQ 0x707f +#define MATCH_FSQ 0x4027 +#define MASK_FSQ 0x707f +#define MATCH_FMADD_Q 0x6000043 +#define MASK_FMADD_Q 0x600007f +#define MATCH_FMSUB_Q 0x6000047 +#define MASK_FMSUB_Q 0x600007f +#define MATCH_FNMSUB_Q 0x600004b +#define MASK_FNMSUB_Q 0x600007f +#define MATCH_FNMADD_Q 0x600004f +#define MASK_FNMADD_Q 0x600007f +#define MATCH_FCVT_L_Q 0xc6200053 +#define MASK_FCVT_L_Q 0xfff0007f +#define MATCH_FCVT_LU_Q 0xc6300053 +#define MASK_FCVT_LU_Q 0xfff0007f +#define MATCH_FCVT_Q_L 0xd6200053 +#define MASK_FCVT_Q_L 0xfff0007f +#define MATCH_FCVT_Q_LU 0xd6300053 +#define MASK_FCVT_Q_LU 0xfff0007f +#define MATCH_ANDN 0x40007033 +#define MASK_ANDN 0xfe00707f +#define MATCH_ORN 0x40006033 +#define MASK_ORN 0xfe00707f +#define MATCH_XNOR 0x40004033 +#define MASK_XNOR 0xfe00707f +#define MATCH_SLO 0x20001033 +#define MASK_SLO 0xfe00707f +#define MATCH_SRO 0x20005033 +#define MASK_SRO 0xfe00707f +#define MATCH_ROL 0x60001033 +#define MASK_ROL 0xfe00707f +#define MATCH_ROR 0x60005033 +#define MASK_ROR 0xfe00707f +#define MATCH_BCLR 0x48001033 +#define MASK_BCLR 0xfe00707f +#define MATCH_BSET 0x28001033 +#define MASK_BSET 0xfe00707f +#define MATCH_BINV 0x68001033 +#define MASK_BINV 0xfe00707f +#define MATCH_BEXT 0x48005033 +#define MASK_BEXT 0xfe00707f +#define MATCH_GORC 0x28005033 +#define MASK_GORC 0xfe00707f +#define MATCH_GREV 0x68005033 +#define MASK_GREV 0xfe00707f +#define MATCH_SLOI 0x20001013 +#define MASK_SLOI 0xfc00707f +#define MATCH_SROI 0x20005013 +#define MASK_SROI 0xfc00707f +#define MATCH_RORI 0x60005013 +#define MASK_RORI 0xfc00707f +#define MATCH_BCLRI 0x48001013 +#define MASK_BCLRI 0xfc00707f +#define MATCH_BSETI 0x28001013 +#define MASK_BSETI 0xfc00707f +#define MATCH_BINVI 0x68001013 +#define MASK_BINVI 0xfc00707f +#define MATCH_BEXTI 0x48005013 +#define MASK_BEXTI 0xfc00707f +#define MATCH_GORCI 0x28005013 +#define MASK_GORCI 0xfc00707f +#define MATCH_GREVI 0x68005013 +#define MASK_GREVI 0xfc00707f +#define MATCH_CMIX 0x6001033 +#define MASK_CMIX 0x600707f +#define MATCH_CMOV 0x6005033 +#define MASK_CMOV 0x600707f +#define MATCH_FSL 0x4001033 +#define MASK_FSL 0x600707f +#define MATCH_FSR 0x4005033 +#define MASK_FSR 0x600707f +#define MATCH_FSRI 0x4005013 +#define MASK_FSRI 0x400707f +#define MATCH_CLZ 0x60001013 +#define MASK_CLZ 0xfff0707f +#define MATCH_CTZ 0x60101013 +#define MASK_CTZ 0xfff0707f +#define MATCH_CPOP 0x60201013 +#define MASK_CPOP 0xfff0707f +#define MATCH_SEXT_B 0x60401013 +#define MASK_SEXT_B 0xfff0707f +#define MATCH_SEXT_H 0x60501013 +#define MASK_SEXT_H 0xfff0707f +#define MATCH_CRC32_B 0x61001013 +#define MASK_CRC32_B 0xfff0707f +#define MATCH_CRC32_H 0x61101013 +#define MASK_CRC32_H 0xfff0707f +#define MATCH_CRC32_W 0x61201013 +#define MASK_CRC32_W 0xfff0707f +#define MATCH_CRC32C_B 0x61801013 +#define MASK_CRC32C_B 0xfff0707f +#define MATCH_CRC32C_H 0x61901013 +#define MASK_CRC32C_H 0xfff0707f +#define MATCH_CRC32C_W 0x61a01013 +#define MASK_CRC32C_W 0xfff0707f +#define MATCH_SH1ADD 0x20002033 +#define MASK_SH1ADD 0xfe00707f +#define MATCH_SH2ADD 0x20004033 +#define MASK_SH2ADD 0xfe00707f +#define MATCH_SH3ADD 0x20006033 +#define MASK_SH3ADD 0xfe00707f +#define MATCH_CLMUL 0xa001033 +#define MASK_CLMUL 0xfe00707f +#define MATCH_CLMULR 0xa002033 +#define MASK_CLMULR 0xfe00707f +#define MATCH_CLMULH 0xa003033 +#define MASK_CLMULH 0xfe00707f +#define MATCH_MIN 0xa004033 +#define MASK_MIN 0xfe00707f +#define MATCH_MINU 0xa005033 +#define MASK_MINU 0xfe00707f +#define MATCH_MAX 0xa006033 +#define MASK_MAX 0xfe00707f +#define MATCH_MAXU 0xa007033 +#define MASK_MAXU 0xfe00707f +#define MATCH_SHFL 0x8001033 +#define MASK_SHFL 0xfe00707f +#define MATCH_UNSHFL 0x8005033 +#define MASK_UNSHFL 0xfe00707f +#define MATCH_BCOMPRESS 0x8006033 +#define MASK_BCOMPRESS 0xfe00707f +#define MATCH_BDECOMPRESS 0x48006033 +#define MASK_BDECOMPRESS 0xfe00707f +#define MATCH_PACK 0x8004033 +#define MASK_PACK 0xfe00707f +#define MATCH_PACKU 0x48004033 +#define MASK_PACKU 0xfe00707f +#define MATCH_PACKH 0x8007033 +#define MASK_PACKH 0xfe00707f +#define MATCH_BFP 0x48007033 +#define MASK_BFP 0xfe00707f +#define MATCH_SHFLI 0x8001013 +#define MASK_SHFLI 0xfe00707f +#define MATCH_UNSHFLI 0x8005013 +#define MASK_UNSHFLI 0xfe00707f +#define MATCH_XPERM4 0x28002033 +#define MASK_XPERM4 0xfe00707f +#define MATCH_XPERM8 0x28004033 +#define MASK_XPERM8 0xfe00707f +#define MATCH_XPERM16 0x28006033 +#define MASK_XPERM16 0xfe00707f +#define MATCH_BMATFLIP 0x60301013 +#define MASK_BMATFLIP 0xfff0707f +#define MATCH_CRC32_D 0x61301013 +#define MASK_CRC32_D 0xfff0707f +#define MATCH_CRC32C_D 0x61b01013 +#define MASK_CRC32C_D 0xfff0707f +#define MATCH_BMATOR 0x8003033 +#define MASK_BMATOR 0xfe00707f +#define MATCH_BMATXOR 0x48003033 +#define MASK_BMATXOR 0xfe00707f +#define MATCH_SLLI_UW 0x800101b +#define MASK_SLLI_UW 0xfc00707f +#define MATCH_ADD_UW 0x800003b +#define MASK_ADD_UW 0xfe00707f +#define MATCH_SLOW 0x2000103b +#define MASK_SLOW 0xfe00707f +#define MATCH_SROW 0x2000503b +#define MASK_SROW 0xfe00707f +#define MATCH_ROLW 0x6000103b +#define MASK_ROLW 0xfe00707f +#define MATCH_RORW 0x6000503b +#define MASK_RORW 0xfe00707f +#define MATCH_GORCW 0x2800503b +#define MASK_GORCW 0xfe00707f +#define MATCH_GREVW 0x6800503b +#define MASK_GREVW 0xfe00707f +#define MATCH_SLOIW 0x2000101b +#define MASK_SLOIW 0xfe00707f +#define MATCH_SROIW 0x2000501b +#define MASK_SROIW 0xfe00707f +#define MATCH_RORIW 0x6000501b +#define MASK_RORIW 0xfe00707f +#define MATCH_GORCIW 0x2800501b +#define MASK_GORCIW 0xfe00707f +#define MATCH_GREVIW 0x6800501b +#define MASK_GREVIW 0xfe00707f +#define MATCH_FSLW 0x400103b +#define MASK_FSLW 0x600707f +#define MATCH_FSRW 0x400503b +#define MASK_FSRW 0x600707f +#define MATCH_FSRIW 0x400501b +#define MASK_FSRIW 0x600707f +#define MATCH_CLZW 0x6000101b +#define MASK_CLZW 0xfff0707f +#define MATCH_CTZW 0x6010101b +#define MASK_CTZW 0xfff0707f +#define MATCH_CPOPW 0x6020101b +#define MASK_CPOPW 0xfff0707f +#define MATCH_SH1ADD_UW 0x2000203b +#define MASK_SH1ADD_UW 0xfe00707f +#define MATCH_SH2ADD_UW 0x2000403b +#define MASK_SH2ADD_UW 0xfe00707f +#define MATCH_SH3ADD_UW 0x2000603b +#define MASK_SH3ADD_UW 0xfe00707f +#define MATCH_SHFLW 0x800103b +#define MASK_SHFLW 0xfe00707f +#define MATCH_UNSHFLW 0x800503b +#define MASK_UNSHFLW 0xfe00707f +#define MATCH_BCOMPRESSW 0x800603b +#define MASK_BCOMPRESSW 0xfe00707f +#define MATCH_BDECOMPRESSW 0x4800603b +#define MASK_BDECOMPRESSW 0xfe00707f +#define MATCH_PACKW 0x800403b +#define MASK_PACKW 0xfe00707f +#define MATCH_PACKUW 0x4800403b +#define MASK_PACKUW 0xfe00707f +#define MATCH_BFPW 0x4800703b +#define MASK_BFPW 0xfe00707f +#define MATCH_XPERM32 0x28000033 +#define MASK_XPERM32 0xfe00707f +#define MATCH_ECALL 0x73 +#define MASK_ECALL 0xffffffff +#define MATCH_EBREAK 0x100073 +#define MASK_EBREAK 0xffffffff +#define MATCH_SRET 0x10200073 +#define MASK_SRET 0xffffffff +#define MATCH_MRET 0x30200073 +#define MASK_MRET 0xffffffff +#define MATCH_DRET 0x7b200073 +#define MASK_DRET 0xffffffff +#define MATCH_SFENCE_VMA 0x12000073 +#define MASK_SFENCE_VMA 0xfe007fff +#define MATCH_WFI 0x10500073 +#define MASK_WFI 0xffffffff +#define MATCH_CSRRW 0x1073 +#define MASK_CSRRW 0x707f +#define MATCH_CSRRS 0x2073 +#define MASK_CSRRS 0x707f +#define MATCH_CSRRC 0x3073 +#define MASK_CSRRC 0x707f +#define MATCH_CSRRWI 0x5073 +#define MASK_CSRRWI 0x707f +#define MATCH_CSRRSI 0x6073 +#define MASK_CSRRSI 0x707f +#define MATCH_CSRRCI 0x7073 +#define MASK_CSRRCI 0x707f +#define MATCH_SINVAL_VMA 0x16000073 +#define MASK_SINVAL_VMA 0xfe007fff +#define MATCH_SFENCE_W_INVAL 0x18000073 +#define MASK_SFENCE_W_INVAL 0xffffffff +#define MATCH_SFENCE_INVAL_IR 0x18100073 +#define MASK_SFENCE_INVAL_IR 0xffffffff +#define MATCH_HINVAL_VVMA 0x26000073 +#define MASK_HINVAL_VVMA 0xfe007fff +#define MATCH_HINVAL_GVMA 0x66000073 +#define MASK_HINVAL_GVMA 0xfe007fff +#define MATCH_FADD_H 0x4000053 +#define MASK_FADD_H 0xfe00007f +#define MATCH_FSUB_H 0xc000053 +#define MASK_FSUB_H 0xfe00007f +#define MATCH_FMUL_H 0x14000053 +#define MASK_FMUL_H 0xfe00007f +#define MATCH_FDIV_H 0x1c000053 +#define MASK_FDIV_H 0xfe00007f +#define MATCH_FSGNJ_H 0x24000053 +#define MASK_FSGNJ_H 0xfe00707f +#define MATCH_FSGNJN_H 0x24001053 +#define MASK_FSGNJN_H 0xfe00707f +#define MATCH_FSGNJX_H 0x24002053 +#define MASK_FSGNJX_H 0xfe00707f +#define MATCH_FMIN_H 0x2c000053 +#define MASK_FMIN_H 0xfe00707f +#define MATCH_FMAX_H 0x2c001053 +#define MASK_FMAX_H 0xfe00707f +#define MATCH_FCVT_H_S 0x44000053 +#define MASK_FCVT_H_S 0xfff0007f +#define MATCH_FCVT_S_H 0x40200053 +#define MASK_FCVT_S_H 0xfff0007f +#define MATCH_FSQRT_H 0x5c000053 +#define MASK_FSQRT_H 0xfff0007f +#define MATCH_FLE_H 0xa4000053 +#define MASK_FLE_H 0xfe00707f +#define MATCH_FLT_H 0xa4001053 +#define MASK_FLT_H 0xfe00707f +#define MATCH_FEQ_H 0xa4002053 +#define MASK_FEQ_H 0xfe00707f +#define MATCH_FCVT_W_H 0xc4000053 +#define MASK_FCVT_W_H 0xfff0007f +#define MATCH_FCVT_WU_H 0xc4100053 +#define MASK_FCVT_WU_H 0xfff0007f +#define MATCH_FMV_X_H 0xe4000053 +#define MASK_FMV_X_H 0xfff0707f +#define MATCH_FCLASS_H 0xe4001053 +#define MASK_FCLASS_H 0xfff0707f +#define MATCH_FCVT_H_W 0xd4000053 +#define MASK_FCVT_H_W 0xfff0007f +#define MATCH_FCVT_H_WU 0xd4100053 +#define MASK_FCVT_H_WU 0xfff0007f +#define MATCH_FMV_H_X 0xf4000053 +#define MASK_FMV_H_X 0xfff0707f +#define MATCH_FLH 0x1007 +#define MASK_FLH 0x707f +#define MATCH_FSH 0x1027 +#define MASK_FSH 0x707f +#define MATCH_FMADD_H 0x4000043 +#define MASK_FMADD_H 0x600007f +#define MATCH_FMSUB_H 0x4000047 +#define MASK_FMSUB_H 0x600007f +#define MATCH_FNMSUB_H 0x400004b +#define MASK_FNMSUB_H 0x600007f +#define MATCH_FNMADD_H 0x400004f +#define MASK_FNMADD_H 0x600007f +#define MATCH_FCVT_H_D 0x44100053 +#define MASK_FCVT_H_D 0xfff0007f +#define MATCH_FCVT_D_H 0x42200053 +#define MASK_FCVT_D_H 0xfff0007f +#define MATCH_FCVT_H_Q 0x44300053 +#define MASK_FCVT_H_Q 0xfff0007f +#define MATCH_FCVT_Q_H 0x46200053 +#define MASK_FCVT_Q_H 0xfff0007f +#define MATCH_FCVT_L_H 0xc4200053 +#define MASK_FCVT_L_H 0xfff0007f +#define MATCH_FCVT_LU_H 0xc4300053 +#define MASK_FCVT_LU_H 0xfff0007f +#define MATCH_FCVT_H_L 0xd4200053 +#define MASK_FCVT_H_L 0xfff0007f +#define MATCH_FCVT_H_LU 0xd4300053 +#define MASK_FCVT_H_LU 0xfff0007f +#define MATCH_SM4ED 0x30000033 +#define MASK_SM4ED 0x3e00707f +#define MATCH_SM4KS 0x34000033 +#define MASK_SM4KS 0x3e00707f +#define MATCH_SM3P0 0x10801013 +#define MASK_SM3P0 0xfff0707f +#define MATCH_SM3P1 0x10901013 +#define MASK_SM3P1 0xfff0707f +#define MATCH_SHA256SUM0 0x10001013 +#define MASK_SHA256SUM0 0xfff0707f +#define MATCH_SHA256SUM1 0x10101013 +#define MASK_SHA256SUM1 0xfff0707f +#define MATCH_SHA256SIG0 0x10201013 +#define MASK_SHA256SIG0 0xfff0707f +#define MATCH_SHA256SIG1 0x10301013 +#define MASK_SHA256SIG1 0xfff0707f +#define MATCH_AES32ESMI 0x26000033 +#define MASK_AES32ESMI 0x3e00707f +#define MATCH_AES32ESI 0x22000033 +#define MASK_AES32ESI 0x3e00707f +#define MATCH_AES32DSMI 0x2e000033 +#define MASK_AES32DSMI 0x3e00707f +#define MATCH_AES32DSI 0x2a000033 +#define MASK_AES32DSI 0x3e00707f +#define MATCH_SHA512SUM0R 0x50000033 +#define MASK_SHA512SUM0R 0xfe00707f +#define MATCH_SHA512SUM1R 0x52000033 +#define MASK_SHA512SUM1R 0xfe00707f +#define MATCH_SHA512SIG0L 0x54000033 +#define MASK_SHA512SIG0L 0xfe00707f +#define MATCH_SHA512SIG0H 0x5c000033 +#define MASK_SHA512SIG0H 0xfe00707f +#define MATCH_SHA512SIG1L 0x56000033 +#define MASK_SHA512SIG1L 0xfe00707f +#define MATCH_SHA512SIG1H 0x5e000033 +#define MASK_SHA512SIG1H 0xfe00707f +#define MATCH_AES64KS1I 0x31001013 +#define MASK_AES64KS1I 0xff00707f +#define MATCH_AES64IM 0x30001013 +#define MASK_AES64IM 0xfff0707f +#define MATCH_AES64KS2 0x7e000033 +#define MASK_AES64KS2 0xfe00707f +#define MATCH_AES64ESM 0x36000033 +#define MASK_AES64ESM 0xfe00707f +#define MATCH_AES64ES 0x32000033 +#define MASK_AES64ES 0xfe00707f +#define MATCH_AES64DSM 0x3e000033 +#define MASK_AES64DSM 0xfe00707f +#define MATCH_AES64DS 0x3a000033 +#define MASK_AES64DS 0xfe00707f +#define MATCH_SHA512SUM0 0x10401013 +#define MASK_SHA512SUM0 0xfff0707f +#define MATCH_SHA512SUM1 0x10501013 +#define MASK_SHA512SUM1 0xfff0707f +#define MATCH_SHA512SIG0 0x10601013 +#define MASK_SHA512SIG0 0xfff0707f +#define MATCH_SHA512SIG1 0x10701013 +#define MASK_SHA512SIG1 0xfff0707f +#define MATCH_CBO_CLEAN 0x10200f +#define MASK_CBO_CLEAN 0xfff07fff +#define MATCH_CBO_FLUSH 0x20200f +#define MASK_CBO_FLUSH 0xfff07fff +#define MATCH_CBO_INVAL 0x200f +#define MASK_CBO_INVAL 0xfff07fff +#define MATCH_CBO_ZERO 0x40200f +#define MASK_CBO_ZERO 0xfff07fff +#define MATCH_PREFETCH_I 0x6013 +#define MASK_PREFETCH_I 0x1f07fff +#define MATCH_PREFETCH_R 0x106013 +#define MASK_PREFETCH_R 0x1f07fff +#define MATCH_PREFETCH_W 0x306013 +#define MASK_PREFETCH_W 0x1f07fff +#define MATCH_C_NOP 0x1 +#define MASK_C_NOP 0xffff +#define MATCH_C_ADDI16SP 0x6101 +#define MASK_C_ADDI16SP 0xef83 +#define MATCH_C_JR 0x8002 +#define MASK_C_JR 0xf07f +#define MATCH_C_JALR 0x9002 +#define MASK_C_JALR 0xf07f +#define MATCH_C_EBREAK 0x9002 +#define MASK_C_EBREAK 0xffff +#define MATCH_C_ADDI4SPN 0x0 +#define MASK_C_ADDI4SPN 0xe003 +#define MATCH_C_FLD 0x2000 +#define MASK_C_FLD 0xe003 +#define MATCH_C_LW 0x4000 +#define MASK_C_LW 0xe003 +#define MATCH_C_FLW 0x6000 +#define MASK_C_FLW 0xe003 +#define MATCH_C_FSD 0xa000 +#define MASK_C_FSD 0xe003 +#define MATCH_C_SW 0xc000 +#define MASK_C_SW 0xe003 +#define MATCH_C_FSW 0xe000 +#define MASK_C_FSW 0xe003 +#define MATCH_C_ADDI 0x1 +#define MASK_C_ADDI 0xe003 +#define MATCH_C_JAL 0x2001 +#define MASK_C_JAL 0xe003 +#define MATCH_C_LI 0x4001 +#define MASK_C_LI 0xe003 +#define MATCH_C_LUI 0x6001 +#define MASK_C_LUI 0xe003 +#define MATCH_C_SRLI 0x8001 +#define MASK_C_SRLI 0xec03 +#define MATCH_C_SRAI 0x8401 +#define MASK_C_SRAI 0xec03 +#define MATCH_C_ANDI 0x8801 +#define MASK_C_ANDI 0xec03 +#define MATCH_C_SUB 0x8c01 +#define MASK_C_SUB 0xfc63 +#define MATCH_C_XOR 0x8c21 +#define MASK_C_XOR 0xfc63 +#define MATCH_C_OR 0x8c41 +#define MASK_C_OR 0xfc63 +#define MATCH_C_AND 0x8c61 +#define MASK_C_AND 0xfc63 +#define MATCH_C_J 0xa001 +#define MASK_C_J 0xe003 +#define MATCH_C_BEQZ 0xc001 +#define MASK_C_BEQZ 0xe003 +#define MATCH_C_BNEZ 0xe001 +#define MASK_C_BNEZ 0xe003 +#define MATCH_C_SLLI 0x2 +#define MASK_C_SLLI 0xe003 +#define MATCH_C_FLDSP 0x2002 +#define MASK_C_FLDSP 0xe003 +#define MATCH_C_LWSP 0x4002 +#define MASK_C_LWSP 0xe003 +#define MATCH_C_FLWSP 0x6002 +#define MASK_C_FLWSP 0xe003 +#define MATCH_C_MV 0x8002 +#define MASK_C_MV 0xf003 +#define MATCH_C_ADD 0x9002 +#define MASK_C_ADD 0xf003 +#define MATCH_C_FSDSP 0xa002 +#define MASK_C_FSDSP 0xe003 +#define MATCH_C_SWSP 0xc002 +#define MASK_C_SWSP 0xe003 +#define MATCH_C_FSWSP 0xe002 +#define MASK_C_FSWSP 0xe003 +#define MATCH_C_SRLI_RV32 0x8001 +#define MASK_C_SRLI_RV32 0xfc03 +#define MATCH_C_SRAI_RV32 0x8401 +#define MASK_C_SRAI_RV32 0xfc03 +#define MATCH_C_SLLI_RV32 0x2 +#define MASK_C_SLLI_RV32 0xf003 +#define MATCH_C_LD 0x6000 +#define MASK_C_LD 0xe003 +#define MATCH_C_SD 0xe000 +#define MASK_C_SD 0xe003 +#define MATCH_C_SUBW 0x9c01 +#define MASK_C_SUBW 0xfc63 +#define MATCH_C_ADDW 0x9c21 +#define MASK_C_ADDW 0xfc63 +#define MATCH_C_ADDIW 0x2001 +#define MASK_C_ADDIW 0xe003 +#define MATCH_C_LDSP 0x6002 +#define MASK_C_LDSP 0xe003 +#define MATCH_C_SDSP 0xe002 +#define MASK_C_SDSP 0xe003 +#define MATCH_CUSTOM0 0xb +#define MASK_CUSTOM0 0x707f +#define MATCH_CUSTOM0_RS1 0x200b +#define MASK_CUSTOM0_RS1 0x707f +#define MATCH_CUSTOM0_RS1_RS2 0x300b +#define MASK_CUSTOM0_RS1_RS2 0x707f +#define MATCH_CUSTOM0_RD 0x400b +#define MASK_CUSTOM0_RD 0x707f +#define MATCH_CUSTOM0_RD_RS1 0x600b +#define MASK_CUSTOM0_RD_RS1 0x707f +#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b +#define MASK_CUSTOM0_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM1 0x2b +#define MASK_CUSTOM1 0x707f +#define MATCH_CUSTOM1_RS1 0x202b +#define MASK_CUSTOM1_RS1 0x707f +#define MATCH_CUSTOM1_RS1_RS2 0x302b +#define MASK_CUSTOM1_RS1_RS2 0x707f +#define MATCH_CUSTOM1_RD 0x402b +#define MASK_CUSTOM1_RD 0x707f +#define MATCH_CUSTOM1_RD_RS1 0x602b +#define MASK_CUSTOM1_RD_RS1 0x707f +#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b +#define MASK_CUSTOM1_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM2 0x5b +#define MASK_CUSTOM2 0x707f +#define MATCH_CUSTOM2_RS1 0x205b +#define MASK_CUSTOM2_RS1 0x707f +#define MATCH_CUSTOM2_RS1_RS2 0x305b +#define MASK_CUSTOM2_RS1_RS2 0x707f +#define MATCH_CUSTOM2_RD 0x405b +#define MASK_CUSTOM2_RD 0x707f +#define MATCH_CUSTOM2_RD_RS1 0x605b +#define MASK_CUSTOM2_RD_RS1 0x707f +#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b +#define MASK_CUSTOM2_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM3 0x7b +#define MASK_CUSTOM3 0x707f +#define MATCH_CUSTOM3_RS1 0x207b +#define MASK_CUSTOM3_RS1 0x707f +#define MATCH_CUSTOM3_RS1_RS2 0x307b +#define MASK_CUSTOM3_RS1_RS2 0x707f +#define MATCH_CUSTOM3_RD 0x407b +#define MASK_CUSTOM3_RD 0x707f +#define MATCH_CUSTOM3_RD_RS1 0x607b +#define MASK_CUSTOM3_RD_RS1 0x707f +#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b +#define MASK_CUSTOM3_RD_RS1_RS2 0x707f +#define MATCH_VSETIVLI 0xc0007057 +#define MASK_VSETIVLI 0xc000707f +#define MATCH_VSETVLI 0x7057 +#define MASK_VSETVLI 0x8000707f +#define MATCH_VSETVL 0x80007057 +#define MASK_VSETVL 0xfe00707f +#define MATCH_VLM_V 0x2b00007 +#define MASK_VLM_V 0xfff0707f +#define MATCH_VSM_V 0x2b00027 +#define MASK_VSM_V 0xfff0707f +#define MATCH_VLE8_V 0x7 +#define MASK_VLE8_V 0x1df0707f +#define MATCH_VLE16_V 0x5007 +#define MASK_VLE16_V 0x1df0707f +#define MATCH_VLE32_V 0x6007 +#define MASK_VLE32_V 0x1df0707f +#define MATCH_VLE64_V 0x7007 +#define MASK_VLE64_V 0x1df0707f +#define MATCH_VLE128_V 0x10000007 +#define MASK_VLE128_V 0x1df0707f +#define MATCH_VLE256_V 0x10005007 +#define MASK_VLE256_V 0x1df0707f +#define MATCH_VLE512_V 0x10006007 +#define MASK_VLE512_V 0x1df0707f +#define MATCH_VLE1024_V 0x10007007 +#define MASK_VLE1024_V 0x1df0707f +#define MATCH_VSE8_V 0x27 +#define MASK_VSE8_V 0x1df0707f +#define MATCH_VSE16_V 0x5027 +#define MASK_VSE16_V 0x1df0707f +#define MATCH_VSE32_V 0x6027 +#define MASK_VSE32_V 0x1df0707f +#define MATCH_VSE64_V 0x7027 +#define MASK_VSE64_V 0x1df0707f +#define MATCH_VSE128_V 0x10000027 +#define MASK_VSE128_V 0x1df0707f +#define MATCH_VSE256_V 0x10005027 +#define MASK_VSE256_V 0x1df0707f +#define MATCH_VSE512_V 0x10006027 +#define MASK_VSE512_V 0x1df0707f +#define MATCH_VSE1024_V 0x10007027 +#define MASK_VSE1024_V 0x1df0707f +#define MATCH_VLUXEI8_V 0x4000007 +#define MASK_VLUXEI8_V 0x1c00707f +#define MATCH_VLUXEI16_V 0x4005007 +#define MASK_VLUXEI16_V 0x1c00707f +#define MATCH_VLUXEI32_V 0x4006007 +#define MASK_VLUXEI32_V 0x1c00707f +#define MATCH_VLUXEI64_V 0x4007007 +#define MASK_VLUXEI64_V 0x1c00707f +#define MATCH_VLUXEI128_V 0x14000007 +#define MASK_VLUXEI128_V 0x1c00707f +#define MATCH_VLUXEI256_V 0x14005007 +#define MASK_VLUXEI256_V 0x1c00707f +#define MATCH_VLUXEI512_V 0x14006007 +#define MASK_VLUXEI512_V 0x1c00707f +#define MATCH_VLUXEI1024_V 0x14007007 +#define MASK_VLUXEI1024_V 0x1c00707f +#define MATCH_VSUXEI8_V 0x4000027 +#define MASK_VSUXEI8_V 0x1c00707f +#define MATCH_VSUXEI16_V 0x4005027 +#define MASK_VSUXEI16_V 0x1c00707f +#define MATCH_VSUXEI32_V 0x4006027 +#define MASK_VSUXEI32_V 0x1c00707f +#define MATCH_VSUXEI64_V 0x4007027 +#define MASK_VSUXEI64_V 0x1c00707f +#define MATCH_VSUXEI128_V 0x14000027 +#define MASK_VSUXEI128_V 0x1c00707f +#define MATCH_VSUXEI256_V 0x14005027 +#define MASK_VSUXEI256_V 0x1c00707f +#define MATCH_VSUXEI512_V 0x14006027 +#define MASK_VSUXEI512_V 0x1c00707f +#define MATCH_VSUXEI1024_V 0x14007027 +#define MASK_VSUXEI1024_V 0x1c00707f +#define MATCH_VLSE8_V 0x8000007 +#define MASK_VLSE8_V 0x1c00707f +#define MATCH_VLSE16_V 0x8005007 +#define MASK_VLSE16_V 0x1c00707f +#define MATCH_VLSE32_V 0x8006007 +#define MASK_VLSE32_V 0x1c00707f +#define MATCH_VLSE64_V 0x8007007 +#define MASK_VLSE64_V 0x1c00707f +#define MATCH_VLSE128_V 0x18000007 +#define MASK_VLSE128_V 0x1c00707f +#define MATCH_VLSE256_V 0x18005007 +#define MASK_VLSE256_V 0x1c00707f +#define MATCH_VLSE512_V 0x18006007 +#define MASK_VLSE512_V 0x1c00707f +#define MATCH_VLSE1024_V 0x18007007 +#define MASK_VLSE1024_V 0x1c00707f +#define MATCH_VSSE8_V 0x8000027 +#define MASK_VSSE8_V 0x1c00707f +#define MATCH_VSSE16_V 0x8005027 +#define MASK_VSSE16_V 0x1c00707f +#define MATCH_VSSE32_V 0x8006027 +#define MASK_VSSE32_V 0x1c00707f +#define MATCH_VSSE64_V 0x8007027 +#define MASK_VSSE64_V 0x1c00707f +#define MATCH_VSSE128_V 0x18000027 +#define MASK_VSSE128_V 0x1c00707f +#define MATCH_VSSE256_V 0x18005027 +#define MASK_VSSE256_V 0x1c00707f +#define MATCH_VSSE512_V 0x18006027 +#define MASK_VSSE512_V 0x1c00707f +#define MATCH_VSSE1024_V 0x18007027 +#define MASK_VSSE1024_V 0x1c00707f +#define MATCH_VLOXEI8_V 0xc000007 +#define MASK_VLOXEI8_V 0x1c00707f +#define MATCH_VLOXEI16_V 0xc005007 +#define MASK_VLOXEI16_V 0x1c00707f +#define MATCH_VLOXEI32_V 0xc006007 +#define MASK_VLOXEI32_V 0x1c00707f +#define MATCH_VLOXEI64_V 0xc007007 +#define MASK_VLOXEI64_V 0x1c00707f +#define MATCH_VLOXEI128_V 0x1c000007 +#define MASK_VLOXEI128_V 0x1c00707f +#define MATCH_VLOXEI256_V 0x1c005007 +#define MASK_VLOXEI256_V 0x1c00707f +#define MATCH_VLOXEI512_V 0x1c006007 +#define MASK_VLOXEI512_V 0x1c00707f +#define MATCH_VLOXEI1024_V 0x1c007007 +#define MASK_VLOXEI1024_V 0x1c00707f +#define MATCH_VSOXEI8_V 0xc000027 +#define MASK_VSOXEI8_V 0x1c00707f +#define MATCH_VSOXEI16_V 0xc005027 +#define MASK_VSOXEI16_V 0x1c00707f +#define MATCH_VSOXEI32_V 0xc006027 +#define MASK_VSOXEI32_V 0x1c00707f +#define MATCH_VSOXEI64_V 0xc007027 +#define MASK_VSOXEI64_V 0x1c00707f +#define MATCH_VSOXEI128_V 0x1c000027 +#define MASK_VSOXEI128_V 0x1c00707f +#define MATCH_VSOXEI256_V 0x1c005027 +#define MASK_VSOXEI256_V 0x1c00707f +#define MATCH_VSOXEI512_V 0x1c006027 +#define MASK_VSOXEI512_V 0x1c00707f +#define MATCH_VSOXEI1024_V 0x1c007027 +#define MASK_VSOXEI1024_V 0x1c00707f +#define MATCH_VLE8FF_V 0x1000007 +#define MASK_VLE8FF_V 0x1df0707f +#define MATCH_VLE16FF_V 0x1005007 +#define MASK_VLE16FF_V 0x1df0707f +#define MATCH_VLE32FF_V 0x1006007 +#define MASK_VLE32FF_V 0x1df0707f +#define MATCH_VLE64FF_V 0x1007007 +#define MASK_VLE64FF_V 0x1df0707f +#define MATCH_VLE128FF_V 0x11000007 +#define MASK_VLE128FF_V 0x1df0707f +#define MATCH_VLE256FF_V 0x11005007 +#define MASK_VLE256FF_V 0x1df0707f +#define MATCH_VLE512FF_V 0x11006007 +#define MASK_VLE512FF_V 0x1df0707f +#define MATCH_VLE1024FF_V 0x11007007 +#define MASK_VLE1024FF_V 0x1df0707f +#define MATCH_VL1RE8_V 0x2800007 +#define MASK_VL1RE8_V 0xfff0707f +#define MATCH_VL1RE16_V 0x2805007 +#define MASK_VL1RE16_V 0xfff0707f +#define MATCH_VL1RE32_V 0x2806007 +#define MASK_VL1RE32_V 0xfff0707f +#define MATCH_VL1RE64_V 0x2807007 +#define MASK_VL1RE64_V 0xfff0707f +#define MATCH_VL2RE8_V 0x22800007 +#define MASK_VL2RE8_V 0xfff0707f +#define MATCH_VL2RE16_V 0x22805007 +#define MASK_VL2RE16_V 0xfff0707f +#define MATCH_VL2RE32_V 0x22806007 +#define MASK_VL2RE32_V 0xfff0707f +#define MATCH_VL2RE64_V 0x22807007 +#define MASK_VL2RE64_V 0xfff0707f +#define MATCH_VL4RE8_V 0x62800007 +#define MASK_VL4RE8_V 0xfff0707f +#define MATCH_VL4RE16_V 0x62805007 +#define MASK_VL4RE16_V 0xfff0707f +#define MATCH_VL4RE32_V 0x62806007 +#define MASK_VL4RE32_V 0xfff0707f +#define MATCH_VL4RE64_V 0x62807007 +#define MASK_VL4RE64_V 0xfff0707f +#define MATCH_VL8RE8_V 0xe2800007 +#define MASK_VL8RE8_V 0xfff0707f +#define MATCH_VL8RE16_V 0xe2805007 +#define MASK_VL8RE16_V 0xfff0707f +#define MATCH_VL8RE32_V 0xe2806007 +#define MASK_VL8RE32_V 0xfff0707f +#define MATCH_VL8RE64_V 0xe2807007 +#define MASK_VL8RE64_V 0xfff0707f +#define MATCH_VS1R_V 0x2800027 +#define MASK_VS1R_V 0xfff0707f +#define MATCH_VS2R_V 0x22800027 +#define MASK_VS2R_V 0xfff0707f +#define MATCH_VS4R_V 0x62800027 +#define MASK_VS4R_V 0xfff0707f +#define MATCH_VS8R_V 0xe2800027 +#define MASK_VS8R_V 0xfff0707f +#define MATCH_VFADD_VF 0x5057 +#define MASK_VFADD_VF 0xfc00707f +#define MATCH_VFSUB_VF 0x8005057 +#define MASK_VFSUB_VF 0xfc00707f +#define MATCH_VFMIN_VF 0x10005057 +#define MASK_VFMIN_VF 0xfc00707f +#define MATCH_VFMAX_VF 0x18005057 +#define MASK_VFMAX_VF 0xfc00707f +#define MATCH_VFSGNJ_VF 0x20005057 +#define MASK_VFSGNJ_VF 0xfc00707f +#define MATCH_VFSGNJN_VF 0x24005057 +#define MASK_VFSGNJN_VF 0xfc00707f +#define MATCH_VFSGNJX_VF 0x28005057 +#define MASK_VFSGNJX_VF 0xfc00707f +#define MATCH_VFSLIDE1UP_VF 0x38005057 +#define MASK_VFSLIDE1UP_VF 0xfc00707f +#define MATCH_VFSLIDE1DOWN_VF 0x3c005057 +#define MASK_VFSLIDE1DOWN_VF 0xfc00707f +#define MATCH_VFMV_S_F 0x42005057 +#define MASK_VFMV_S_F 0xfff0707f +#define MATCH_VFMERGE_VFM 0x5c005057 +#define MASK_VFMERGE_VFM 0xfe00707f +#define MATCH_VFMV_V_F 0x5e005057 +#define MASK_VFMV_V_F 0xfff0707f +#define MATCH_VMFEQ_VF 0x60005057 +#define MASK_VMFEQ_VF 0xfc00707f +#define MATCH_VMFLE_VF 0x64005057 +#define MASK_VMFLE_VF 0xfc00707f +#define MATCH_VMFLT_VF 0x6c005057 +#define MASK_VMFLT_VF 0xfc00707f +#define MATCH_VMFNE_VF 0x70005057 +#define MASK_VMFNE_VF 0xfc00707f +#define MATCH_VMFGT_VF 0x74005057 +#define MASK_VMFGT_VF 0xfc00707f +#define MATCH_VMFGE_VF 0x7c005057 +#define MASK_VMFGE_VF 0xfc00707f +#define MATCH_VFDIV_VF 0x80005057 +#define MASK_VFDIV_VF 0xfc00707f +#define MATCH_VFRDIV_VF 0x84005057 +#define MASK_VFRDIV_VF 0xfc00707f +#define MATCH_VFMUL_VF 0x90005057 +#define MASK_VFMUL_VF 0xfc00707f +#define MATCH_VFRSUB_VF 0x9c005057 +#define MASK_VFRSUB_VF 0xfc00707f +#define MATCH_VFMADD_VF 0xa0005057 +#define MASK_VFMADD_VF 0xfc00707f +#define MATCH_VFNMADD_VF 0xa4005057 +#define MASK_VFNMADD_VF 0xfc00707f +#define MATCH_VFMSUB_VF 0xa8005057 +#define MASK_VFMSUB_VF 0xfc00707f +#define MATCH_VFNMSUB_VF 0xac005057 +#define MASK_VFNMSUB_VF 0xfc00707f +#define MATCH_VFMACC_VF 0xb0005057 +#define MASK_VFMACC_VF 0xfc00707f +#define MATCH_VFNMACC_VF 0xb4005057 +#define MASK_VFNMACC_VF 0xfc00707f +#define MATCH_VFMSAC_VF 0xb8005057 +#define MASK_VFMSAC_VF 0xfc00707f +#define MATCH_VFNMSAC_VF 0xbc005057 +#define MASK_VFNMSAC_VF 0xfc00707f +#define MATCH_VFWADD_VF 0xc0005057 +#define MASK_VFWADD_VF 0xfc00707f +#define MATCH_VFWSUB_VF 0xc8005057 +#define MASK_VFWSUB_VF 0xfc00707f +#define MATCH_VFWADD_WF 0xd0005057 +#define MASK_VFWADD_WF 0xfc00707f +#define MATCH_VFWSUB_WF 0xd8005057 +#define MASK_VFWSUB_WF 0xfc00707f +#define MATCH_VFWMUL_VF 0xe0005057 +#define MASK_VFWMUL_VF 0xfc00707f +#define MATCH_VFWMACC_VF 0xf0005057 +#define MASK_VFWMACC_VF 0xfc00707f +#define MATCH_VFWNMACC_VF 0xf4005057 +#define MASK_VFWNMACC_VF 0xfc00707f +#define MATCH_VFWMSAC_VF 0xf8005057 +#define MASK_VFWMSAC_VF 0xfc00707f +#define MATCH_VFWNMSAC_VF 0xfc005057 +#define MASK_VFWNMSAC_VF 0xfc00707f +#define MATCH_VFADD_VV 0x1057 +#define MASK_VFADD_VV 0xfc00707f +#define MATCH_VFREDUSUM_VS 0x4001057 +#define MASK_VFREDUSUM_VS 0xfc00707f +#define MATCH_VFSUB_VV 0x8001057 +#define MASK_VFSUB_VV 0xfc00707f +#define MATCH_VFREDOSUM_VS 0xc001057 +#define MASK_VFREDOSUM_VS 0xfc00707f +#define MATCH_VFMIN_VV 0x10001057 +#define MASK_VFMIN_VV 0xfc00707f +#define MATCH_VFREDMIN_VS 0x14001057 +#define MASK_VFREDMIN_VS 0xfc00707f +#define MATCH_VFMAX_VV 0x18001057 +#define MASK_VFMAX_VV 0xfc00707f +#define MATCH_VFREDMAX_VS 0x1c001057 +#define MASK_VFREDMAX_VS 0xfc00707f +#define MATCH_VFSGNJ_VV 0x20001057 +#define MASK_VFSGNJ_VV 0xfc00707f +#define MATCH_VFSGNJN_VV 0x24001057 +#define MASK_VFSGNJN_VV 0xfc00707f +#define MATCH_VFSGNJX_VV 0x28001057 +#define MASK_VFSGNJX_VV 0xfc00707f +#define MATCH_VFMV_F_S 0x42001057 +#define MASK_VFMV_F_S 0xfe0ff07f +#define MATCH_VMFEQ_VV 0x60001057 +#define MASK_VMFEQ_VV 0xfc00707f +#define MATCH_VMFLE_VV 0x64001057 +#define MASK_VMFLE_VV 0xfc00707f +#define MATCH_VMFLT_VV 0x6c001057 +#define MASK_VMFLT_VV 0xfc00707f +#define MATCH_VMFNE_VV 0x70001057 +#define MASK_VMFNE_VV 0xfc00707f +#define MATCH_VFDIV_VV 0x80001057 +#define MASK_VFDIV_VV 0xfc00707f +#define MATCH_VFMUL_VV 0x90001057 +#define MASK_VFMUL_VV 0xfc00707f +#define MATCH_VFMADD_VV 0xa0001057 +#define MASK_VFMADD_VV 0xfc00707f +#define MATCH_VFNMADD_VV 0xa4001057 +#define MASK_VFNMADD_VV 0xfc00707f +#define MATCH_VFMSUB_VV 0xa8001057 +#define MASK_VFMSUB_VV 0xfc00707f +#define MATCH_VFNMSUB_VV 0xac001057 +#define MASK_VFNMSUB_VV 0xfc00707f +#define MATCH_VFMACC_VV 0xb0001057 +#define MASK_VFMACC_VV 0xfc00707f +#define MATCH_VFNMACC_VV 0xb4001057 +#define MASK_VFNMACC_VV 0xfc00707f +#define MATCH_VFMSAC_VV 0xb8001057 +#define MASK_VFMSAC_VV 0xfc00707f +#define MATCH_VFNMSAC_VV 0xbc001057 +#define MASK_VFNMSAC_VV 0xfc00707f +#define MATCH_VFCVT_XU_F_V 0x48001057 +#define MASK_VFCVT_XU_F_V 0xfc0ff07f +#define MATCH_VFCVT_X_F_V 0x48009057 +#define MASK_VFCVT_X_F_V 0xfc0ff07f +#define MATCH_VFCVT_F_XU_V 0x48011057 +#define MASK_VFCVT_F_XU_V 0xfc0ff07f +#define MATCH_VFCVT_F_X_V 0x48019057 +#define MASK_VFCVT_F_X_V 0xfc0ff07f +#define MATCH_VFCVT_RTZ_XU_F_V 0x48031057 +#define MASK_VFCVT_RTZ_XU_F_V 0xfc0ff07f +#define MATCH_VFCVT_RTZ_X_F_V 0x48039057 +#define MASK_VFCVT_RTZ_X_F_V 0xfc0ff07f +#define MATCH_VFWCVT_XU_F_V 0x48041057 +#define MASK_VFWCVT_XU_F_V 0xfc0ff07f +#define MATCH_VFWCVT_X_F_V 0x48049057 +#define MASK_VFWCVT_X_F_V 0xfc0ff07f +#define MATCH_VFWCVT_F_XU_V 0x48051057 +#define MASK_VFWCVT_F_XU_V 0xfc0ff07f +#define MATCH_VFWCVT_F_X_V 0x48059057 +#define MASK_VFWCVT_F_X_V 0xfc0ff07f +#define MATCH_VFWCVT_F_F_V 0x48061057 +#define MASK_VFWCVT_F_F_V 0xfc0ff07f +#define MATCH_VFWCVT_RTZ_XU_F_V 0x48071057 +#define MASK_VFWCVT_RTZ_XU_F_V 0xfc0ff07f +#define MATCH_VFWCVT_RTZ_X_F_V 0x48079057 +#define MASK_VFWCVT_RTZ_X_F_V 0xfc0ff07f +#define MATCH_VFNCVT_XU_F_W 0x48081057 +#define MASK_VFNCVT_XU_F_W 0xfc0ff07f +#define MATCH_VFNCVT_X_F_W 0x48089057 +#define MASK_VFNCVT_X_F_W 0xfc0ff07f +#define MATCH_VFNCVT_F_XU_W 0x48091057 +#define MASK_VFNCVT_F_XU_W 0xfc0ff07f +#define MATCH_VFNCVT_F_X_W 0x48099057 +#define MASK_VFNCVT_F_X_W 0xfc0ff07f +#define MATCH_VFNCVT_F_F_W 0x480a1057 +#define MASK_VFNCVT_F_F_W 0xfc0ff07f +#define MATCH_VFNCVT_ROD_F_F_W 0x480a9057 +#define MASK_VFNCVT_ROD_F_F_W 0xfc0ff07f +#define MATCH_VFNCVT_RTZ_XU_F_W 0x480b1057 +#define MASK_VFNCVT_RTZ_XU_F_W 0xfc0ff07f +#define MATCH_VFNCVT_RTZ_X_F_W 0x480b9057 +#define MASK_VFNCVT_RTZ_X_F_W 0xfc0ff07f +#define MATCH_VFSQRT_V 0x4c001057 +#define MASK_VFSQRT_V 0xfc0ff07f +#define MATCH_VFRSQRT7_V 0x4c021057 +#define MASK_VFRSQRT7_V 0xfc0ff07f +#define MATCH_VFREC7_V 0x4c029057 +#define MASK_VFREC7_V 0xfc0ff07f +#define MATCH_VFCLASS_V 0x4c081057 +#define MASK_VFCLASS_V 0xfc0ff07f +#define MATCH_VFWADD_VV 0xc0001057 +#define MASK_VFWADD_VV 0xfc00707f +#define MATCH_VFWREDUSUM_VS 0xc4001057 +#define MASK_VFWREDUSUM_VS 0xfc00707f +#define MATCH_VFWSUB_VV 0xc8001057 +#define MASK_VFWSUB_VV 0xfc00707f +#define MATCH_VFWREDOSUM_VS 0xcc001057 +#define MASK_VFWREDOSUM_VS 0xfc00707f +#define MATCH_VFWADD_WV 0xd0001057 +#define MASK_VFWADD_WV 0xfc00707f +#define MATCH_VFWSUB_WV 0xd8001057 +#define MASK_VFWSUB_WV 0xfc00707f +#define MATCH_VFWMUL_VV 0xe0001057 +#define MASK_VFWMUL_VV 0xfc00707f +#define MATCH_VFWMACC_VV 0xf0001057 +#define MASK_VFWMACC_VV 0xfc00707f +#define MATCH_VFWNMACC_VV 0xf4001057 +#define MASK_VFWNMACC_VV 0xfc00707f +#define MATCH_VFWMSAC_VV 0xf8001057 +#define MASK_VFWMSAC_VV 0xfc00707f +#define MATCH_VFWNMSAC_VV 0xfc001057 +#define MASK_VFWNMSAC_VV 0xfc00707f +#define MATCH_VADD_VX 0x4057 +#define MASK_VADD_VX 0xfc00707f +#define MATCH_VSUB_VX 0x8004057 +#define MASK_VSUB_VX 0xfc00707f +#define MATCH_VRSUB_VX 0xc004057 +#define MASK_VRSUB_VX 0xfc00707f +#define MATCH_VMINU_VX 0x10004057 +#define MASK_VMINU_VX 0xfc00707f +#define MATCH_VMIN_VX 0x14004057 +#define MASK_VMIN_VX 0xfc00707f +#define MATCH_VMAXU_VX 0x18004057 +#define MASK_VMAXU_VX 0xfc00707f +#define MATCH_VMAX_VX 0x1c004057 +#define MASK_VMAX_VX 0xfc00707f +#define MATCH_VAND_VX 0x24004057 +#define MASK_VAND_VX 0xfc00707f +#define MATCH_VOR_VX 0x28004057 +#define MASK_VOR_VX 0xfc00707f +#define MATCH_VXOR_VX 0x2c004057 +#define MASK_VXOR_VX 0xfc00707f +#define MATCH_VRGATHER_VX 0x30004057 +#define MASK_VRGATHER_VX 0xfc00707f +#define MATCH_VSLIDEUP_VX 0x38004057 +#define MASK_VSLIDEUP_VX 0xfc00707f +#define MATCH_VSLIDEDOWN_VX 0x3c004057 +#define MASK_VSLIDEDOWN_VX 0xfc00707f +#define MATCH_VADC_VXM 0x40004057 +#define MASK_VADC_VXM 0xfe00707f +#define MATCH_VMADC_VXM 0x44004057 +#define MASK_VMADC_VXM 0xfe00707f +#define MATCH_VMADC_VX 0x46004057 +#define MASK_VMADC_VX 0xfe00707f +#define MATCH_VSBC_VXM 0x48004057 +#define MASK_VSBC_VXM 0xfe00707f +#define MATCH_VMSBC_VXM 0x4c004057 +#define MASK_VMSBC_VXM 0xfe00707f +#define MATCH_VMSBC_VX 0x4e004057 +#define MASK_VMSBC_VX 0xfe00707f +#define MATCH_VMERGE_VXM 0x5c004057 +#define MASK_VMERGE_VXM 0xfe00707f +#define MATCH_VMV_V_X 0x5e004057 +#define MASK_VMV_V_X 0xfff0707f +#define MATCH_VMSEQ_VX 0x60004057 +#define MASK_VMSEQ_VX 0xfc00707f +#define MATCH_VMSNE_VX 0x64004057 +#define MASK_VMSNE_VX 0xfc00707f +#define MATCH_VMSLTU_VX 0x68004057 +#define MASK_VMSLTU_VX 0xfc00707f +#define MATCH_VMSLT_VX 0x6c004057 +#define MASK_VMSLT_VX 0xfc00707f +#define MATCH_VMSLEU_VX 0x70004057 +#define MASK_VMSLEU_VX 0xfc00707f +#define MATCH_VMSLE_VX 0x74004057 +#define MASK_VMSLE_VX 0xfc00707f +#define MATCH_VMSGTU_VX 0x78004057 +#define MASK_VMSGTU_VX 0xfc00707f +#define MATCH_VMSGT_VX 0x7c004057 +#define MASK_VMSGT_VX 0xfc00707f +#define MATCH_VSADDU_VX 0x80004057 +#define MASK_VSADDU_VX 0xfc00707f +#define MATCH_VSADD_VX 0x84004057 +#define MASK_VSADD_VX 0xfc00707f +#define MATCH_VSSUBU_VX 0x88004057 +#define MASK_VSSUBU_VX 0xfc00707f +#define MATCH_VSSUB_VX 0x8c004057 +#define MASK_VSSUB_VX 0xfc00707f +#define MATCH_VSLL_VX 0x94004057 +#define MASK_VSLL_VX 0xfc00707f +#define MATCH_VSMUL_VX 0x9c004057 +#define MASK_VSMUL_VX 0xfc00707f +#define MATCH_VSRL_VX 0xa0004057 +#define MASK_VSRL_VX 0xfc00707f +#define MATCH_VSRA_VX 0xa4004057 +#define MASK_VSRA_VX 0xfc00707f +#define MATCH_VSSRL_VX 0xa8004057 +#define MASK_VSSRL_VX 0xfc00707f +#define MATCH_VSSRA_VX 0xac004057 +#define MASK_VSSRA_VX 0xfc00707f +#define MATCH_VNSRL_WX 0xb0004057 +#define MASK_VNSRL_WX 0xfc00707f +#define MATCH_VNSRA_WX 0xb4004057 +#define MASK_VNSRA_WX 0xfc00707f +#define MATCH_VNCLIPU_WX 0xb8004057 +#define MASK_VNCLIPU_WX 0xfc00707f +#define MATCH_VNCLIP_WX 0xbc004057 +#define MASK_VNCLIP_WX 0xfc00707f +#define MATCH_VADD_VV 0x57 +#define MASK_VADD_VV 0xfc00707f +#define MATCH_VSUB_VV 0x8000057 +#define MASK_VSUB_VV 0xfc00707f +#define MATCH_VMINU_VV 0x10000057 +#define MASK_VMINU_VV 0xfc00707f +#define MATCH_VMIN_VV 0x14000057 +#define MASK_VMIN_VV 0xfc00707f +#define MATCH_VMAXU_VV 0x18000057 +#define MASK_VMAXU_VV 0xfc00707f +#define MATCH_VMAX_VV 0x1c000057 +#define MASK_VMAX_VV 0xfc00707f +#define MATCH_VAND_VV 0x24000057 +#define MASK_VAND_VV 0xfc00707f +#define MATCH_VOR_VV 0x28000057 +#define MASK_VOR_VV 0xfc00707f +#define MATCH_VXOR_VV 0x2c000057 +#define MASK_VXOR_VV 0xfc00707f +#define MATCH_VRGATHER_VV 0x30000057 +#define MASK_VRGATHER_VV 0xfc00707f +#define MATCH_VRGATHEREI16_VV 0x38000057 +#define MASK_VRGATHEREI16_VV 0xfc00707f +#define MATCH_VADC_VVM 0x40000057 +#define MASK_VADC_VVM 0xfe00707f +#define MATCH_VMADC_VVM 0x44000057 +#define MASK_VMADC_VVM 0xfe00707f +#define MATCH_VMADC_VV 0x46000057 +#define MASK_VMADC_VV 0xfe00707f +#define MATCH_VSBC_VVM 0x48000057 +#define MASK_VSBC_VVM 0xfe00707f +#define MATCH_VMSBC_VVM 0x4c000057 +#define MASK_VMSBC_VVM 0xfe00707f +#define MATCH_VMSBC_VV 0x4e000057 +#define MASK_VMSBC_VV 0xfe00707f +#define MATCH_VMERGE_VVM 0x5c000057 +#define MASK_VMERGE_VVM 0xfe00707f +#define MATCH_VMV_V_V 0x5e000057 +#define MASK_VMV_V_V 0xfff0707f +#define MATCH_VMSEQ_VV 0x60000057 +#define MASK_VMSEQ_VV 0xfc00707f +#define MATCH_VMSNE_VV 0x64000057 +#define MASK_VMSNE_VV 0xfc00707f +#define MATCH_VMSLTU_VV 0x68000057 +#define MASK_VMSLTU_VV 0xfc00707f +#define MATCH_VMSLT_VV 0x6c000057 +#define MASK_VMSLT_VV 0xfc00707f +#define MATCH_VMSLEU_VV 0x70000057 +#define MASK_VMSLEU_VV 0xfc00707f +#define MATCH_VMSLE_VV 0x74000057 +#define MASK_VMSLE_VV 0xfc00707f +#define MATCH_VSADDU_VV 0x80000057 +#define MASK_VSADDU_VV 0xfc00707f +#define MATCH_VSADD_VV 0x84000057 +#define MASK_VSADD_VV 0xfc00707f +#define MATCH_VSSUBU_VV 0x88000057 +#define MASK_VSSUBU_VV 0xfc00707f +#define MATCH_VSSUB_VV 0x8c000057 +#define MASK_VSSUB_VV 0xfc00707f +#define MATCH_VSLL_VV 0x94000057 +#define MASK_VSLL_VV 0xfc00707f +#define MATCH_VSMUL_VV 0x9c000057 +#define MASK_VSMUL_VV 0xfc00707f +#define MATCH_VSRL_VV 0xa0000057 +#define MASK_VSRL_VV 0xfc00707f +#define MATCH_VSRA_VV 0xa4000057 +#define MASK_VSRA_VV 0xfc00707f +#define MATCH_VSSRL_VV 0xa8000057 +#define MASK_VSSRL_VV 0xfc00707f +#define MATCH_VSSRA_VV 0xac000057 +#define MASK_VSSRA_VV 0xfc00707f +#define MATCH_VNSRL_WV 0xb0000057 +#define MASK_VNSRL_WV 0xfc00707f +#define MATCH_VNSRA_WV 0xb4000057 +#define MASK_VNSRA_WV 0xfc00707f +#define MATCH_VNCLIPU_WV 0xb8000057 +#define MASK_VNCLIPU_WV 0xfc00707f +#define MATCH_VNCLIP_WV 0xbc000057 +#define MASK_VNCLIP_WV 0xfc00707f +#define MATCH_VWREDSUMU_VS 0xc0000057 +#define MASK_VWREDSUMU_VS 0xfc00707f +#define MATCH_VWREDSUM_VS 0xc4000057 +#define MASK_VWREDSUM_VS 0xfc00707f +#define MATCH_VADD_VI 0x3057 +#define MASK_VADD_VI 0xfc00707f +#define MATCH_VRSUB_VI 0xc003057 +#define MASK_VRSUB_VI 0xfc00707f +#define MATCH_VAND_VI 0x24003057 +#define MASK_VAND_VI 0xfc00707f +#define MATCH_VOR_VI 0x28003057 +#define MASK_VOR_VI 0xfc00707f +#define MATCH_VXOR_VI 0x2c003057 +#define MASK_VXOR_VI 0xfc00707f +#define MATCH_VRGATHER_VI 0x30003057 +#define MASK_VRGATHER_VI 0xfc00707f +#define MATCH_VSLIDEUP_VI 0x38003057 +#define MASK_VSLIDEUP_VI 0xfc00707f +#define MATCH_VSLIDEDOWN_VI 0x3c003057 +#define MASK_VSLIDEDOWN_VI 0xfc00707f +#define MATCH_VADC_VIM 0x40003057 +#define MASK_VADC_VIM 0xfe00707f +#define MATCH_VMADC_VIM 0x44003057 +#define MASK_VMADC_VIM 0xfe00707f +#define MATCH_VMADC_VI 0x46003057 +#define MASK_VMADC_VI 0xfe00707f +#define MATCH_VMERGE_VIM 0x5c003057 +#define MASK_VMERGE_VIM 0xfe00707f +#define MATCH_VMV_V_I 0x5e003057 +#define MASK_VMV_V_I 0xfff0707f +#define MATCH_VMSEQ_VI 0x60003057 +#define MASK_VMSEQ_VI 0xfc00707f +#define MATCH_VMSNE_VI 0x64003057 +#define MASK_VMSNE_VI 0xfc00707f +#define MATCH_VMSLEU_VI 0x70003057 +#define MASK_VMSLEU_VI 0xfc00707f +#define MATCH_VMSLE_VI 0x74003057 +#define MASK_VMSLE_VI 0xfc00707f +#define MATCH_VMSGTU_VI 0x78003057 +#define MASK_VMSGTU_VI 0xfc00707f +#define MATCH_VMSGT_VI 0x7c003057 +#define MASK_VMSGT_VI 0xfc00707f +#define MATCH_VSADDU_VI 0x80003057 +#define MASK_VSADDU_VI 0xfc00707f +#define MATCH_VSADD_VI 0x84003057 +#define MASK_VSADD_VI 0xfc00707f +#define MATCH_VSLL_VI 0x94003057 +#define MASK_VSLL_VI 0xfc00707f +#define MATCH_VMV1R_V 0x9e003057 +#define MASK_VMV1R_V 0xfe0ff07f +#define MATCH_VMV2R_V 0x9e00b057 +#define MASK_VMV2R_V 0xfe0ff07f +#define MATCH_VMV4R_V 0x9e01b057 +#define MASK_VMV4R_V 0xfe0ff07f +#define MATCH_VMV8R_V 0x9e03b057 +#define MASK_VMV8R_V 0xfe0ff07f +#define MATCH_VSRL_VI 0xa0003057 +#define MASK_VSRL_VI 0xfc00707f +#define MATCH_VSRA_VI 0xa4003057 +#define MASK_VSRA_VI 0xfc00707f +#define MATCH_VSSRL_VI 0xa8003057 +#define MASK_VSSRL_VI 0xfc00707f +#define MATCH_VSSRA_VI 0xac003057 +#define MASK_VSSRA_VI 0xfc00707f +#define MATCH_VNSRL_WI 0xb0003057 +#define MASK_VNSRL_WI 0xfc00707f +#define MATCH_VNSRA_WI 0xb4003057 +#define MASK_VNSRA_WI 0xfc00707f +#define MATCH_VNCLIPU_WI 0xb8003057 +#define MASK_VNCLIPU_WI 0xfc00707f +#define MATCH_VNCLIP_WI 0xbc003057 +#define MASK_VNCLIP_WI 0xfc00707f +#define MATCH_VREDSUM_VS 0x2057 +#define MASK_VREDSUM_VS 0xfc00707f +#define MATCH_VREDAND_VS 0x4002057 +#define MASK_VREDAND_VS 0xfc00707f +#define MATCH_VREDOR_VS 0x8002057 +#define MASK_VREDOR_VS 0xfc00707f +#define MATCH_VREDXOR_VS 0xc002057 +#define MASK_VREDXOR_VS 0xfc00707f +#define MATCH_VREDMINU_VS 0x10002057 +#define MASK_VREDMINU_VS 0xfc00707f +#define MATCH_VREDMIN_VS 0x14002057 +#define MASK_VREDMIN_VS 0xfc00707f +#define MATCH_VREDMAXU_VS 0x18002057 +#define MASK_VREDMAXU_VS 0xfc00707f +#define MATCH_VREDMAX_VS 0x1c002057 +#define MASK_VREDMAX_VS 0xfc00707f +#define MATCH_VAADDU_VV 0x20002057 +#define MASK_VAADDU_VV 0xfc00707f +#define MATCH_VAADD_VV 0x24002057 +#define MASK_VAADD_VV 0xfc00707f +#define MATCH_VASUBU_VV 0x28002057 +#define MASK_VASUBU_VV 0xfc00707f +#define MATCH_VASUB_VV 0x2c002057 +#define MASK_VASUB_VV 0xfc00707f +#define MATCH_VMV_X_S 0x42002057 +#define MASK_VMV_X_S 0xfe0ff07f +#define MATCH_VZEXT_VF8 0x48012057 +#define MASK_VZEXT_VF8 0xfc0ff07f +#define MATCH_VSEXT_VF8 0x4801a057 +#define MASK_VSEXT_VF8 0xfc0ff07f +#define MATCH_VZEXT_VF4 0x48022057 +#define MASK_VZEXT_VF4 0xfc0ff07f +#define MATCH_VSEXT_VF4 0x4802a057 +#define MASK_VSEXT_VF4 0xfc0ff07f +#define MATCH_VZEXT_VF2 0x48032057 +#define MASK_VZEXT_VF2 0xfc0ff07f +#define MATCH_VSEXT_VF2 0x4803a057 +#define MASK_VSEXT_VF2 0xfc0ff07f +#define MATCH_VCOMPRESS_VM 0x5e002057 +#define MASK_VCOMPRESS_VM 0xfe00707f +#define MATCH_VMANDN_MM 0x60002057 +#define MASK_VMANDN_MM 0xfc00707f +#define MATCH_VMAND_MM 0x64002057 +#define MASK_VMAND_MM 0xfc00707f +#define MATCH_VMOR_MM 0x68002057 +#define MASK_VMOR_MM 0xfc00707f +#define MATCH_VMXOR_MM 0x6c002057 +#define MASK_VMXOR_MM 0xfc00707f +#define MATCH_VMORN_MM 0x70002057 +#define MASK_VMORN_MM 0xfc00707f +#define MATCH_VMNAND_MM 0x74002057 +#define MASK_VMNAND_MM 0xfc00707f +#define MATCH_VMNOR_MM 0x78002057 +#define MASK_VMNOR_MM 0xfc00707f +#define MATCH_VMXNOR_MM 0x7c002057 +#define MASK_VMXNOR_MM 0xfc00707f +#define MATCH_VMSBF_M 0x5000a057 +#define MASK_VMSBF_M 0xfc0ff07f +#define MATCH_VMSOF_M 0x50012057 +#define MASK_VMSOF_M 0xfc0ff07f +#define MATCH_VMSIF_M 0x5001a057 +#define MASK_VMSIF_M 0xfc0ff07f +#define MATCH_VIOTA_M 0x50082057 +#define MASK_VIOTA_M 0xfc0ff07f +#define MATCH_VID_V 0x5008a057 +#define MASK_VID_V 0xfdfff07f +#define MATCH_VCPOP_M 0x40082057 +#define MASK_VCPOP_M 0xfc0ff07f +#define MATCH_VFIRST_M 0x4008a057 +#define MASK_VFIRST_M 0xfc0ff07f +#define MATCH_VDIVU_VV 0x80002057 +#define MASK_VDIVU_VV 0xfc00707f +#define MATCH_VDIV_VV 0x84002057 +#define MASK_VDIV_VV 0xfc00707f +#define MATCH_VREMU_VV 0x88002057 +#define MASK_VREMU_VV 0xfc00707f +#define MATCH_VREM_VV 0x8c002057 +#define MASK_VREM_VV 0xfc00707f +#define MATCH_VMULHU_VV 0x90002057 +#define MASK_VMULHU_VV 0xfc00707f +#define MATCH_VMUL_VV 0x94002057 +#define MASK_VMUL_VV 0xfc00707f +#define MATCH_VMULHSU_VV 0x98002057 +#define MASK_VMULHSU_VV 0xfc00707f +#define MATCH_VMULH_VV 0x9c002057 +#define MASK_VMULH_VV 0xfc00707f +#define MATCH_VMADD_VV 0xa4002057 +#define MASK_VMADD_VV 0xfc00707f +#define MATCH_VNMSUB_VV 0xac002057 +#define MASK_VNMSUB_VV 0xfc00707f +#define MATCH_VMACC_VV 0xb4002057 +#define MASK_VMACC_VV 0xfc00707f +#define MATCH_VNMSAC_VV 0xbc002057 +#define MASK_VNMSAC_VV 0xfc00707f +#define MATCH_VWADDU_VV 0xc0002057 +#define MASK_VWADDU_VV 0xfc00707f +#define MATCH_VWADD_VV 0xc4002057 +#define MASK_VWADD_VV 0xfc00707f +#define MATCH_VWSUBU_VV 0xc8002057 +#define MASK_VWSUBU_VV 0xfc00707f +#define MATCH_VWSUB_VV 0xcc002057 +#define MASK_VWSUB_VV 0xfc00707f +#define MATCH_VWADDU_WV 0xd0002057 +#define MASK_VWADDU_WV 0xfc00707f +#define MATCH_VWADD_WV 0xd4002057 +#define MASK_VWADD_WV 0xfc00707f +#define MATCH_VWSUBU_WV 0xd8002057 +#define MASK_VWSUBU_WV 0xfc00707f +#define MATCH_VWSUB_WV 0xdc002057 +#define MASK_VWSUB_WV 0xfc00707f +#define MATCH_VWMULU_VV 0xe0002057 +#define MASK_VWMULU_VV 0xfc00707f +#define MATCH_VWMULSU_VV 0xe8002057 +#define MASK_VWMULSU_VV 0xfc00707f +#define MATCH_VWMUL_VV 0xec002057 +#define MASK_VWMUL_VV 0xfc00707f +#define MATCH_VWMACCU_VV 0xf0002057 +#define MASK_VWMACCU_VV 0xfc00707f +#define MATCH_VWMACC_VV 0xf4002057 +#define MASK_VWMACC_VV 0xfc00707f +#define MATCH_VWMACCSU_VV 0xfc002057 +#define MASK_VWMACCSU_VV 0xfc00707f +#define MATCH_VAADDU_VX 0x20006057 +#define MASK_VAADDU_VX 0xfc00707f +#define MATCH_VAADD_VX 0x24006057 +#define MASK_VAADD_VX 0xfc00707f +#define MATCH_VASUBU_VX 0x28006057 +#define MASK_VASUBU_VX 0xfc00707f +#define MATCH_VASUB_VX 0x2c006057 +#define MASK_VASUB_VX 0xfc00707f +#define MATCH_VMV_S_X 0x42006057 +#define MASK_VMV_S_X 0xfff0707f +#define MATCH_VSLIDE1UP_VX 0x38006057 +#define MASK_VSLIDE1UP_VX 0xfc00707f +#define MATCH_VSLIDE1DOWN_VX 0x3c006057 +#define MASK_VSLIDE1DOWN_VX 0xfc00707f +#define MATCH_VDIVU_VX 0x80006057 +#define MASK_VDIVU_VX 0xfc00707f +#define MATCH_VDIV_VX 0x84006057 +#define MASK_VDIV_VX 0xfc00707f +#define MATCH_VREMU_VX 0x88006057 +#define MASK_VREMU_VX 0xfc00707f +#define MATCH_VREM_VX 0x8c006057 +#define MASK_VREM_VX 0xfc00707f +#define MATCH_VMULHU_VX 0x90006057 +#define MASK_VMULHU_VX 0xfc00707f +#define MATCH_VMUL_VX 0x94006057 +#define MASK_VMUL_VX 0xfc00707f +#define MATCH_VMULHSU_VX 0x98006057 +#define MASK_VMULHSU_VX 0xfc00707f +#define MATCH_VMULH_VX 0x9c006057 +#define MASK_VMULH_VX 0xfc00707f +#define MATCH_VMADD_VX 0xa4006057 +#define MASK_VMADD_VX 0xfc00707f +#define MATCH_VNMSUB_VX 0xac006057 +#define MASK_VNMSUB_VX 0xfc00707f +#define MATCH_VMACC_VX 0xb4006057 +#define MASK_VMACC_VX 0xfc00707f +#define MATCH_VNMSAC_VX 0xbc006057 +#define MASK_VNMSAC_VX 0xfc00707f +#define MATCH_VWADDU_VX 0xc0006057 +#define MASK_VWADDU_VX 0xfc00707f +#define MATCH_VWADD_VX 0xc4006057 +#define MASK_VWADD_VX 0xfc00707f +#define MATCH_VWSUBU_VX 0xc8006057 +#define MASK_VWSUBU_VX 0xfc00707f +#define MATCH_VWSUB_VX 0xcc006057 +#define MASK_VWSUB_VX 0xfc00707f +#define MATCH_VWADDU_WX 0xd0006057 +#define MASK_VWADDU_WX 0xfc00707f +#define MATCH_VWADD_WX 0xd4006057 +#define MASK_VWADD_WX 0xfc00707f +#define MATCH_VWSUBU_WX 0xd8006057 +#define MASK_VWSUBU_WX 0xfc00707f +#define MATCH_VWSUB_WX 0xdc006057 +#define MASK_VWSUB_WX 0xfc00707f +#define MATCH_VWMULU_VX 0xe0006057 +#define MASK_VWMULU_VX 0xfc00707f +#define MATCH_VWMULSU_VX 0xe8006057 +#define MASK_VWMULSU_VX 0xfc00707f +#define MATCH_VWMUL_VX 0xec006057 +#define MASK_VWMUL_VX 0xfc00707f +#define MATCH_VWMACCU_VX 0xf0006057 +#define MASK_VWMACCU_VX 0xfc00707f +#define MATCH_VWMACC_VX 0xf4006057 +#define MASK_VWMACC_VX 0xfc00707f +#define MATCH_VWMACCUS_VX 0xf8006057 +#define MASK_VWMACCUS_VX 0xfc00707f +#define MATCH_VWMACCSU_VX 0xfc006057 +#define MASK_VWMACCSU_VX 0xfc00707f +#define MATCH_VAMOSWAPEI8_V 0x800002f +#define MASK_VAMOSWAPEI8_V 0xf800707f +#define MATCH_VAMOADDEI8_V 0x2f +#define MASK_VAMOADDEI8_V 0xf800707f +#define MATCH_VAMOXOREI8_V 0x2000002f +#define MASK_VAMOXOREI8_V 0xf800707f +#define MATCH_VAMOANDEI8_V 0x6000002f +#define MASK_VAMOANDEI8_V 0xf800707f +#define MATCH_VAMOOREI8_V 0x4000002f +#define MASK_VAMOOREI8_V 0xf800707f +#define MATCH_VAMOMINEI8_V 0x8000002f +#define MASK_VAMOMINEI8_V 0xf800707f +#define MATCH_VAMOMAXEI8_V 0xa000002f +#define MASK_VAMOMAXEI8_V 0xf800707f +#define MATCH_VAMOMINUEI8_V 0xc000002f +#define MASK_VAMOMINUEI8_V 0xf800707f +#define MATCH_VAMOMAXUEI8_V 0xe000002f +#define MASK_VAMOMAXUEI8_V 0xf800707f +#define MATCH_VAMOSWAPEI16_V 0x800502f +#define MASK_VAMOSWAPEI16_V 0xf800707f +#define MATCH_VAMOADDEI16_V 0x502f +#define MASK_VAMOADDEI16_V 0xf800707f +#define MATCH_VAMOXOREI16_V 0x2000502f +#define MASK_VAMOXOREI16_V 0xf800707f +#define MATCH_VAMOANDEI16_V 0x6000502f +#define MASK_VAMOANDEI16_V 0xf800707f +#define MATCH_VAMOOREI16_V 0x4000502f +#define MASK_VAMOOREI16_V 0xf800707f +#define MATCH_VAMOMINEI16_V 0x8000502f +#define MASK_VAMOMINEI16_V 0xf800707f +#define MATCH_VAMOMAXEI16_V 0xa000502f +#define MASK_VAMOMAXEI16_V 0xf800707f +#define MATCH_VAMOMINUEI16_V 0xc000502f +#define MASK_VAMOMINUEI16_V 0xf800707f +#define MATCH_VAMOMAXUEI16_V 0xe000502f +#define MASK_VAMOMAXUEI16_V 0xf800707f +#define MATCH_VAMOSWAPEI32_V 0x800602f +#define MASK_VAMOSWAPEI32_V 0xf800707f +#define MATCH_VAMOADDEI32_V 0x602f +#define MASK_VAMOADDEI32_V 0xf800707f +#define MATCH_VAMOXOREI32_V 0x2000602f +#define MASK_VAMOXOREI32_V 0xf800707f +#define MATCH_VAMOANDEI32_V 0x6000602f +#define MASK_VAMOANDEI32_V 0xf800707f +#define MATCH_VAMOOREI32_V 0x4000602f +#define MASK_VAMOOREI32_V 0xf800707f +#define MATCH_VAMOMINEI32_V 0x8000602f +#define MASK_VAMOMINEI32_V 0xf800707f +#define MATCH_VAMOMAXEI32_V 0xa000602f +#define MASK_VAMOMAXEI32_V 0xf800707f +#define MATCH_VAMOMINUEI32_V 0xc000602f +#define MASK_VAMOMINUEI32_V 0xf800707f +#define MATCH_VAMOMAXUEI32_V 0xe000602f +#define MASK_VAMOMAXUEI32_V 0xf800707f +#define MATCH_VAMOSWAPEI64_V 0x800702f +#define MASK_VAMOSWAPEI64_V 0xf800707f +#define MATCH_VAMOADDEI64_V 0x702f +#define MASK_VAMOADDEI64_V 0xf800707f +#define MATCH_VAMOXOREI64_V 0x2000702f +#define MASK_VAMOXOREI64_V 0xf800707f +#define MATCH_VAMOANDEI64_V 0x6000702f +#define MASK_VAMOANDEI64_V 0xf800707f +#define MATCH_VAMOOREI64_V 0x4000702f +#define MASK_VAMOOREI64_V 0xf800707f +#define MATCH_VAMOMINEI64_V 0x8000702f +#define MASK_VAMOMINEI64_V 0xf800707f +#define MATCH_VAMOMAXEI64_V 0xa000702f +#define MASK_VAMOMAXEI64_V 0xf800707f +#define MATCH_VAMOMINUEI64_V 0xc000702f +#define MASK_VAMOMINUEI64_V 0xf800707f +#define MATCH_VAMOMAXUEI64_V 0xe000702f +#define MASK_VAMOMAXUEI64_V 0xf800707f +#define MATCH_ADD8 0x48000077 +#define MASK_ADD8 0xfe00707f +#define MATCH_ADD16 0x40000077 +#define MASK_ADD16 0xfe00707f +#define MATCH_ADD64 0xc0001077 +#define MASK_ADD64 0xfe00707f +#define MATCH_AVE 0xe0000077 +#define MASK_AVE 0xfe00707f +#define MATCH_BITREV 0xe6000077 +#define MASK_BITREV 0xfe00707f +#define MATCH_BITREVI 0xe8000077 +#define MASK_BITREVI 0xfc00707f +#define MATCH_BPICK 0x3077 +#define MASK_BPICK 0x600707f +#define MATCH_CLRS8 0xae000077 +#define MASK_CLRS8 0xfff0707f +#define MATCH_CLRS16 0xae800077 +#define MASK_CLRS16 0xfff0707f +#define MATCH_CLRS32 0xaf800077 +#define MASK_CLRS32 0xfff0707f +#define MATCH_CLO8 0xae300077 +#define MASK_CLO8 0xfff0707f +#define MATCH_CLO16 0xaeb00077 +#define MASK_CLO16 0xfff0707f +#define MATCH_CLO32 0xafb00077 +#define MASK_CLO32 0xfff0707f +#define MATCH_CLZ8 0xae100077 +#define MASK_CLZ8 0xfff0707f +#define MATCH_CLZ16 0xae900077 +#define MASK_CLZ16 0xfff0707f +#define MATCH_CLZ32 0xaf900077 +#define MASK_CLZ32 0xfff0707f +#define MATCH_CMPEQ8 0x4e000077 +#define MASK_CMPEQ8 0xfe00707f +#define MATCH_CMPEQ16 0x4c000077 +#define MASK_CMPEQ16 0xfe00707f +#define MATCH_CRAS16 0x44000077 +#define MASK_CRAS16 0xfe00707f +#define MATCH_CRSA16 0x46000077 +#define MASK_CRSA16 0xfe00707f +#define MATCH_INSB 0xac000077 +#define MASK_INSB 0xff80707f +#define MATCH_KABS8 0xad000077 +#define MASK_KABS8 0xfff0707f +#define MATCH_KABS16 0xad100077 +#define MASK_KABS16 0xfff0707f +#define MATCH_KABSW 0xad400077 +#define MASK_KABSW 0xfff0707f +#define MATCH_KADD8 0x18000077 +#define MASK_KADD8 0xfe00707f +#define MATCH_KADD16 0x10000077 +#define MASK_KADD16 0xfe00707f +#define MATCH_KADD64 0x90001077 +#define MASK_KADD64 0xfe00707f +#define MATCH_KADDH 0x4001077 +#define MASK_KADDH 0xfe00707f +#define MATCH_KADDW 0x1077 +#define MASK_KADDW 0xfe00707f +#define MATCH_KCRAS16 0x14000077 +#define MASK_KCRAS16 0xfe00707f +#define MATCH_KCRSA16 0x16000077 +#define MASK_KCRSA16 0xfe00707f +#define MATCH_KDMBB 0xa001077 +#define MASK_KDMBB 0xfe00707f +#define MATCH_KDMBT 0x1a001077 +#define MASK_KDMBT 0xfe00707f +#define MATCH_KDMTT 0x2a001077 +#define MASK_KDMTT 0xfe00707f +#define MATCH_KDMABB 0xd2001077 +#define MASK_KDMABB 0xfe00707f +#define MATCH_KDMABT 0xe2001077 +#define MASK_KDMABT 0xfe00707f +#define MATCH_KDMATT 0xf2001077 +#define MASK_KDMATT 0xfe00707f +#define MATCH_KHM8 0x8e000077 +#define MASK_KHM8 0xfe00707f +#define MATCH_KHMX8 0x9e000077 +#define MASK_KHMX8 0xfe00707f +#define MATCH_KHM16 0x86000077 +#define MASK_KHM16 0xfe00707f +#define MATCH_KHMX16 0x96000077 +#define MASK_KHMX16 0xfe00707f +#define MATCH_KHMBB 0xc001077 +#define MASK_KHMBB 0xfe00707f +#define MATCH_KHMBT 0x1c001077 +#define MASK_KHMBT 0xfe00707f +#define MATCH_KHMTT 0x2c001077 +#define MASK_KHMTT 0xfe00707f +#define MATCH_KMABB 0x5a001077 +#define MASK_KMABB 0xfe00707f +#define MATCH_KMABT 0x6a001077 +#define MASK_KMABT 0xfe00707f +#define MATCH_KMATT 0x7a001077 +#define MASK_KMATT 0xfe00707f +#define MATCH_KMADA 0x48001077 +#define MASK_KMADA 0xfe00707f +#define MATCH_KMAXDA 0x4a001077 +#define MASK_KMAXDA 0xfe00707f +#define MATCH_KMADS 0x5c001077 +#define MASK_KMADS 0xfe00707f +#define MATCH_KMADRS 0x6c001077 +#define MASK_KMADRS 0xfe00707f +#define MATCH_KMAXDS 0x7c001077 +#define MASK_KMAXDS 0xfe00707f +#define MATCH_KMAR64 0x94001077 +#define MASK_KMAR64 0xfe00707f +#define MATCH_KMDA 0x38001077 +#define MASK_KMDA 0xfe00707f +#define MATCH_KMXDA 0x3a001077 +#define MASK_KMXDA 0xfe00707f +#define MATCH_KMMAC 0x60001077 +#define MASK_KMMAC 0xfe00707f +#define MATCH_KMMAC_U 0x70001077 +#define MASK_KMMAC_U 0xfe00707f +#define MATCH_KMMAWB 0x46001077 +#define MASK_KMMAWB 0xfe00707f +#define MATCH_KMMAWB_U 0x56001077 +#define MASK_KMMAWB_U 0xfe00707f +#define MATCH_KMMAWB2 0xce001077 +#define MASK_KMMAWB2 0xfe00707f +#define MATCH_KMMAWB2_U 0xde001077 +#define MASK_KMMAWB2_U 0xfe00707f +#define MATCH_KMMAWT 0x66001077 +#define MASK_KMMAWT 0xfe00707f +#define MATCH_KMMAWT_U 0x76001077 +#define MASK_KMMAWT_U 0xfe00707f +#define MATCH_KMMAWT2 0xee001077 +#define MASK_KMMAWT2 0xfe00707f +#define MATCH_KMMAWT2_U 0xfe001077 +#define MASK_KMMAWT2_U 0xfe00707f +#define MATCH_KMMSB 0x42001077 +#define MASK_KMMSB 0xfe00707f +#define MATCH_KMMSB_U 0x52001077 +#define MASK_KMMSB_U 0xfe00707f +#define MATCH_KMMWB2 0x8e001077 +#define MASK_KMMWB2 0xfe00707f +#define MATCH_KMMWB2_U 0x9e001077 +#define MASK_KMMWB2_U 0xfe00707f +#define MATCH_KMMWT2 0xae001077 +#define MASK_KMMWT2 0xfe00707f +#define MATCH_KMMWT2_U 0xbe001077 +#define MASK_KMMWT2_U 0xfe00707f +#define MATCH_KMSDA 0x4c001077 +#define MASK_KMSDA 0xfe00707f +#define MATCH_KMSXDA 0x4e001077 +#define MASK_KMSXDA 0xfe00707f +#define MATCH_KMSR64 0x96001077 +#define MASK_KMSR64 0xfe00707f +#define MATCH_KSLLW 0x26001077 +#define MASK_KSLLW 0xfe00707f +#define MATCH_KSLLIW 0x36001077 +#define MASK_KSLLIW 0xfe00707f +#define MATCH_KSLL8 0x6c000077 +#define MASK_KSLL8 0xfe00707f +#define MATCH_KSLLI8 0x7c800077 +#define MASK_KSLLI8 0xff80707f +#define MATCH_KSLL16 0x64000077 +#define MASK_KSLL16 0xfe00707f +#define MATCH_KSLLI16 0x75000077 +#define MASK_KSLLI16 0xff00707f +#define MATCH_KSLRA8 0x5e000077 +#define MASK_KSLRA8 0xfe00707f +#define MATCH_KSLRA8_U 0x6e000077 +#define MASK_KSLRA8_U 0xfe00707f +#define MATCH_KSLRA16 0x56000077 +#define MASK_KSLRA16 0xfe00707f +#define MATCH_KSLRA16_U 0x66000077 +#define MASK_KSLRA16_U 0xfe00707f +#define MATCH_KSLRAW 0x6e001077 +#define MASK_KSLRAW 0xfe00707f +#define MATCH_KSLRAW_U 0x7e001077 +#define MASK_KSLRAW_U 0xfe00707f +#define MATCH_KSTAS16 0xc4002077 +#define MASK_KSTAS16 0xfe00707f +#define MATCH_KSTSA16 0xc6002077 +#define MASK_KSTSA16 0xfe00707f +#define MATCH_KSUB8 0x1a000077 +#define MASK_KSUB8 0xfe00707f +#define MATCH_KSUB16 0x12000077 +#define MASK_KSUB16 0xfe00707f +#define MATCH_KSUB64 0x92001077 +#define MASK_KSUB64 0xfe00707f +#define MATCH_KSUBH 0x6001077 +#define MASK_KSUBH 0xfe00707f +#define MATCH_KSUBW 0x2001077 +#define MASK_KSUBW 0xfe00707f +#define MATCH_KWMMUL 0x62001077 +#define MASK_KWMMUL 0xfe00707f +#define MATCH_KWMMUL_U 0x72001077 +#define MASK_KWMMUL_U 0xfe00707f +#define MATCH_MADDR32 0xc4001077 +#define MASK_MADDR32 0xfe00707f +#define MATCH_MAXW 0xf2000077 +#define MASK_MAXW 0xfe00707f +#define MATCH_MINW 0xf0000077 +#define MASK_MINW 0xfe00707f +#define MATCH_MSUBR32 0xc6001077 +#define MASK_MSUBR32 0xfe00707f +#define MATCH_MULR64 0xf0001077 +#define MASK_MULR64 0xfe00707f +#define MATCH_MULSR64 0xe0001077 +#define MASK_MULSR64 0xfe00707f +#define MATCH_PBSAD 0xfc000077 +#define MASK_PBSAD 0xfe00707f +#define MATCH_PBSADA 0xfe000077 +#define MASK_PBSADA 0xfe00707f +#define MATCH_PKBB16 0xe001077 +#define MASK_PKBB16 0xfe00707f +#define MATCH_PKBT16 0x1e001077 +#define MASK_PKBT16 0xfe00707f +#define MATCH_PKTT16 0x2e001077 +#define MASK_PKTT16 0xfe00707f +#define MATCH_PKTB16 0x3e001077 +#define MASK_PKTB16 0xfe00707f +#define MATCH_RADD8 0x8000077 +#define MASK_RADD8 0xfe00707f +#define MATCH_RADD16 0x77 +#define MASK_RADD16 0xfe00707f +#define MATCH_RADD64 0x80001077 +#define MASK_RADD64 0xfe00707f +#define MATCH_RADDW 0x20001077 +#define MASK_RADDW 0xfe00707f +#define MATCH_RCRAS16 0x4000077 +#define MASK_RCRAS16 0xfe00707f +#define MATCH_RCRSA16 0x6000077 +#define MASK_RCRSA16 0xfe00707f +#define MATCH_RSTAS16 0xb4002077 +#define MASK_RSTAS16 0xfe00707f +#define MATCH_RSTSA16 0xb6002077 +#define MASK_RSTSA16 0xfe00707f +#define MATCH_RSUB8 0xa000077 +#define MASK_RSUB8 0xfe00707f +#define MATCH_RSUB16 0x2000077 +#define MASK_RSUB16 0xfe00707f +#define MATCH_RSUB64 0x82001077 +#define MASK_RSUB64 0xfe00707f +#define MATCH_RSUBW 0x22001077 +#define MASK_RSUBW 0xfe00707f +#define MATCH_SCLIP8 0x8c000077 +#define MASK_SCLIP8 0xff80707f +#define MATCH_SCLIP16 0x84000077 +#define MASK_SCLIP16 0xff00707f +#define MATCH_SCLIP32 0xe4000077 +#define MASK_SCLIP32 0xfe00707f +#define MATCH_SCMPLE8 0x1e000077 +#define MASK_SCMPLE8 0xfe00707f +#define MATCH_SCMPLE16 0x1c000077 +#define MASK_SCMPLE16 0xfe00707f +#define MATCH_SCMPLT8 0xe000077 +#define MASK_SCMPLT8 0xfe00707f +#define MATCH_SCMPLT16 0xc000077 +#define MASK_SCMPLT16 0xfe00707f +#define MATCH_SLL8 0x5c000077 +#define MASK_SLL8 0xfe00707f +#define MATCH_SLLI8 0x7c000077 +#define MASK_SLLI8 0xff80707f +#define MATCH_SLL16 0x54000077 +#define MASK_SLL16 0xfe00707f +#define MATCH_SLLI16 0x74000077 +#define MASK_SLLI16 0xff00707f +#define MATCH_SMAL 0x5e001077 +#define MASK_SMAL 0xfe00707f +#define MATCH_SMALBB 0x88001077 +#define MASK_SMALBB 0xfe00707f +#define MATCH_SMALBT 0x98001077 +#define MASK_SMALBT 0xfe00707f +#define MATCH_SMALTT 0xa8001077 +#define MASK_SMALTT 0xfe00707f +#define MATCH_SMALDA 0x8c001077 +#define MASK_SMALDA 0xfe00707f +#define MATCH_SMALXDA 0x9c001077 +#define MASK_SMALXDA 0xfe00707f +#define MATCH_SMALDS 0x8a001077 +#define MASK_SMALDS 0xfe00707f +#define MATCH_SMALDRS 0x9a001077 +#define MASK_SMALDRS 0xfe00707f +#define MATCH_SMALXDS 0xaa001077 +#define MASK_SMALXDS 0xfe00707f +#define MATCH_SMAR64 0x84001077 +#define MASK_SMAR64 0xfe00707f +#define MATCH_SMAQA 0xc8000077 +#define MASK_SMAQA 0xfe00707f +#define MATCH_SMAQA_SU 0xca000077 +#define MASK_SMAQA_SU 0xfe00707f +#define MATCH_SMAX8 0x8a000077 +#define MASK_SMAX8 0xfe00707f +#define MATCH_SMAX16 0x82000077 +#define MASK_SMAX16 0xfe00707f +#define MATCH_SMBB16 0x8001077 +#define MASK_SMBB16 0xfe00707f +#define MATCH_SMBT16 0x18001077 +#define MASK_SMBT16 0xfe00707f +#define MATCH_SMTT16 0x28001077 +#define MASK_SMTT16 0xfe00707f +#define MATCH_SMDS 0x58001077 +#define MASK_SMDS 0xfe00707f +#define MATCH_SMDRS 0x68001077 +#define MASK_SMDRS 0xfe00707f +#define MATCH_SMXDS 0x78001077 +#define MASK_SMXDS 0xfe00707f +#define MATCH_SMIN8 0x88000077 +#define MASK_SMIN8 0xfe00707f +#define MATCH_SMIN16 0x80000077 +#define MASK_SMIN16 0xfe00707f +#define MATCH_SMMUL 0x40001077 +#define MASK_SMMUL 0xfe00707f +#define MATCH_SMMUL_U 0x50001077 +#define MASK_SMMUL_U 0xfe00707f +#define MATCH_SMMWB 0x44001077 +#define MASK_SMMWB 0xfe00707f +#define MATCH_SMMWB_U 0x54001077 +#define MASK_SMMWB_U 0xfe00707f +#define MATCH_SMMWT 0x64001077 +#define MASK_SMMWT 0xfe00707f +#define MATCH_SMMWT_U 0x74001077 +#define MASK_SMMWT_U 0xfe00707f +#define MATCH_SMSLDA 0xac001077 +#define MASK_SMSLDA 0xfe00707f +#define MATCH_SMSLXDA 0xbc001077 +#define MASK_SMSLXDA 0xfe00707f +#define MATCH_SMSR64 0x86001077 +#define MASK_SMSR64 0xfe00707f +#define MATCH_SMUL8 0xa8000077 +#define MASK_SMUL8 0xfe00707f +#define MATCH_SMULX8 0xaa000077 +#define MASK_SMULX8 0xfe00707f +#define MATCH_SMUL16 0xa0000077 +#define MASK_SMUL16 0xfe00707f +#define MATCH_SMULX16 0xa2000077 +#define MASK_SMULX16 0xfe00707f +#define MATCH_SRA_U 0x24001077 +#define MASK_SRA_U 0xfe00707f +#define MATCH_SRAI_U 0xd4001077 +#define MASK_SRAI_U 0xfc00707f +#define MATCH_SRA8 0x58000077 +#define MASK_SRA8 0xfe00707f +#define MATCH_SRA8_U 0x68000077 +#define MASK_SRA8_U 0xfe00707f +#define MATCH_SRAI8 0x78000077 +#define MASK_SRAI8 0xff80707f +#define MATCH_SRAI8_U 0x78800077 +#define MASK_SRAI8_U 0xff80707f +#define MATCH_SRA16 0x50000077 +#define MASK_SRA16 0xfe00707f +#define MATCH_SRA16_U 0x60000077 +#define MASK_SRA16_U 0xfe00707f +#define MATCH_SRAI16 0x70000077 +#define MASK_SRAI16 0xff00707f +#define MATCH_SRAI16_U 0x71000077 +#define MASK_SRAI16_U 0xff00707f +#define MATCH_SRL8 0x5a000077 +#define MASK_SRL8 0xfe00707f +#define MATCH_SRL8_U 0x6a000077 +#define MASK_SRL8_U 0xfe00707f +#define MATCH_SRLI8 0x7a000077 +#define MASK_SRLI8 0xff80707f +#define MATCH_SRLI8_U 0x7a800077 +#define MASK_SRLI8_U 0xff80707f +#define MATCH_SRL16 0x52000077 +#define MASK_SRL16 0xfe00707f +#define MATCH_SRL16_U 0x62000077 +#define MASK_SRL16_U 0xfe00707f +#define MATCH_SRLI16 0x72000077 +#define MASK_SRLI16 0xff00707f +#define MATCH_SRLI16_U 0x73000077 +#define MASK_SRLI16_U 0xff00707f +#define MATCH_STAS16 0xf4002077 +#define MASK_STAS16 0xfe00707f +#define MATCH_STSA16 0xf6002077 +#define MASK_STSA16 0xfe00707f +#define MATCH_SUB8 0x4a000077 +#define MASK_SUB8 0xfe00707f +#define MATCH_SUB16 0x42000077 +#define MASK_SUB16 0xfe00707f +#define MATCH_SUB64 0xc2001077 +#define MASK_SUB64 0xfe00707f +#define MATCH_SUNPKD810 0xac800077 +#define MASK_SUNPKD810 0xfff0707f +#define MATCH_SUNPKD820 0xac900077 +#define MASK_SUNPKD820 0xfff0707f +#define MATCH_SUNPKD830 0xaca00077 +#define MASK_SUNPKD830 0xfff0707f +#define MATCH_SUNPKD831 0xacb00077 +#define MASK_SUNPKD831 0xfff0707f +#define MATCH_SUNPKD832 0xad300077 +#define MASK_SUNPKD832 0xfff0707f +#define MATCH_SWAP8 0xad800077 +#define MASK_SWAP8 0xfff0707f +#define MATCH_UCLIP8 0x8d000077 +#define MASK_UCLIP8 0xff80707f +#define MATCH_UCLIP16 0x85000077 +#define MASK_UCLIP16 0xff00707f +#define MATCH_UCLIP32 0xf4000077 +#define MASK_UCLIP32 0xfe00707f +#define MATCH_UCMPLE8 0x3e000077 +#define MASK_UCMPLE8 0xfe00707f +#define MATCH_UCMPLE16 0x3c000077 +#define MASK_UCMPLE16 0xfe00707f +#define MATCH_UCMPLT8 0x2e000077 +#define MASK_UCMPLT8 0xfe00707f +#define MATCH_UCMPLT16 0x2c000077 +#define MASK_UCMPLT16 0xfe00707f +#define MATCH_UKADD8 0x38000077 +#define MASK_UKADD8 0xfe00707f +#define MATCH_UKADD16 0x30000077 +#define MASK_UKADD16 0xfe00707f +#define MATCH_UKADD64 0xb0001077 +#define MASK_UKADD64 0xfe00707f +#define MATCH_UKADDH 0x14001077 +#define MASK_UKADDH 0xfe00707f +#define MATCH_UKADDW 0x10001077 +#define MASK_UKADDW 0xfe00707f +#define MATCH_UKCRAS16 0x34000077 +#define MASK_UKCRAS16 0xfe00707f +#define MATCH_UKCRSA16 0x36000077 +#define MASK_UKCRSA16 0xfe00707f +#define MATCH_UKMAR64 0xb4001077 +#define MASK_UKMAR64 0xfe00707f +#define MATCH_UKMSR64 0xb6001077 +#define MASK_UKMSR64 0xfe00707f +#define MATCH_UKSTAS16 0xe4002077 +#define MASK_UKSTAS16 0xfe00707f +#define MATCH_UKSTSA16 0xe6002077 +#define MASK_UKSTSA16 0xfe00707f +#define MATCH_UKSUB8 0x3a000077 +#define MASK_UKSUB8 0xfe00707f +#define MATCH_UKSUB16 0x32000077 +#define MASK_UKSUB16 0xfe00707f +#define MATCH_UKSUB64 0xb2001077 +#define MASK_UKSUB64 0xfe00707f +#define MATCH_UKSUBH 0x16001077 +#define MASK_UKSUBH 0xfe00707f +#define MATCH_UKSUBW 0x12001077 +#define MASK_UKSUBW 0xfe00707f +#define MATCH_UMAR64 0xa4001077 +#define MASK_UMAR64 0xfe00707f +#define MATCH_UMAQA 0xcc000077 +#define MASK_UMAQA 0xfe00707f +#define MATCH_UMAX8 0x9a000077 +#define MASK_UMAX8 0xfe00707f +#define MATCH_UMAX16 0x92000077 +#define MASK_UMAX16 0xfe00707f +#define MATCH_UMIN8 0x98000077 +#define MASK_UMIN8 0xfe00707f +#define MATCH_UMIN16 0x90000077 +#define MASK_UMIN16 0xfe00707f +#define MATCH_UMSR64 0xa6001077 +#define MASK_UMSR64 0xfe00707f +#define MATCH_UMUL8 0xb8000077 +#define MASK_UMUL8 0xfe00707f +#define MATCH_UMULX8 0xba000077 +#define MASK_UMULX8 0xfe00707f +#define MATCH_UMUL16 0xb0000077 +#define MASK_UMUL16 0xfe00707f +#define MATCH_UMULX16 0xb2000077 +#define MASK_UMULX16 0xfe00707f +#define MATCH_URADD8 0x28000077 +#define MASK_URADD8 0xfe00707f +#define MATCH_URADD16 0x20000077 +#define MASK_URADD16 0xfe00707f +#define MATCH_URADD64 0xa0001077 +#define MASK_URADD64 0xfe00707f +#define MATCH_URADDW 0x30001077 +#define MASK_URADDW 0xfe00707f +#define MATCH_URCRAS16 0x24000077 +#define MASK_URCRAS16 0xfe00707f +#define MATCH_URCRSA16 0x26000077 +#define MASK_URCRSA16 0xfe00707f +#define MATCH_URSTAS16 0xd4002077 +#define MASK_URSTAS16 0xfe00707f +#define MATCH_URSTSA16 0xd6002077 +#define MASK_URSTSA16 0xfe00707f +#define MATCH_URSUB8 0x2a000077 +#define MASK_URSUB8 0xfe00707f +#define MATCH_URSUB16 0x22000077 +#define MASK_URSUB16 0xfe00707f +#define MATCH_URSUB64 0xa2001077 +#define MASK_URSUB64 0xfe00707f +#define MATCH_URSUBW 0x32001077 +#define MASK_URSUBW 0xfe00707f +#define MATCH_WEXTI 0xde000077 +#define MASK_WEXTI 0xfe00707f +#define MATCH_WEXT 0xce000077 +#define MASK_WEXT 0xfe00707f +#define MATCH_ZUNPKD810 0xacc00077 +#define MASK_ZUNPKD810 0xfff0707f +#define MATCH_ZUNPKD820 0xacd00077 +#define MASK_ZUNPKD820 0xfff0707f +#define MATCH_ZUNPKD830 0xace00077 +#define MASK_ZUNPKD830 0xfff0707f +#define MATCH_ZUNPKD831 0xacf00077 +#define MASK_ZUNPKD831 0xfff0707f +#define MATCH_ZUNPKD832 0xad700077 +#define MASK_ZUNPKD832 0xfff0707f +#define MATCH_ADD32 0x40002077 +#define MASK_ADD32 0xfe00707f +#define MATCH_CRAS32 0x44002077 +#define MASK_CRAS32 0xfe00707f +#define MATCH_CRSA32 0x46002077 +#define MASK_CRSA32 0xfe00707f +#define MATCH_KABS32 0xad200077 +#define MASK_KABS32 0xfff0707f +#define MATCH_KADD32 0x10002077 +#define MASK_KADD32 0xfe00707f +#define MATCH_KCRAS32 0x14002077 +#define MASK_KCRAS32 0xfe00707f +#define MATCH_KCRSA32 0x16002077 +#define MASK_KCRSA32 0xfe00707f +#define MATCH_KDMBB16 0xda001077 +#define MASK_KDMBB16 0xfe00707f +#define MATCH_KDMBT16 0xea001077 +#define MASK_KDMBT16 0xfe00707f +#define MATCH_KDMTT16 0xfa001077 +#define MASK_KDMTT16 0xfe00707f +#define MATCH_KDMABB16 0xd8001077 +#define MASK_KDMABB16 0xfe00707f +#define MATCH_KDMABT16 0xe8001077 +#define MASK_KDMABT16 0xfe00707f +#define MATCH_KDMATT16 0xf8001077 +#define MASK_KDMATT16 0xfe00707f +#define MATCH_KHMBB16 0xdc001077 +#define MASK_KHMBB16 0xfe00707f +#define MATCH_KHMBT16 0xec001077 +#define MASK_KHMBT16 0xfe00707f +#define MATCH_KHMTT16 0xfc001077 +#define MASK_KHMTT16 0xfe00707f +#define MATCH_KMABB32 0x5a002077 +#define MASK_KMABB32 0xfe00707f +#define MATCH_KMABT32 0x6a002077 +#define MASK_KMABT32 0xfe00707f +#define MATCH_KMATT32 0x7a002077 +#define MASK_KMATT32 0xfe00707f +#define MATCH_KMAXDA32 0x4a002077 +#define MASK_KMAXDA32 0xfe00707f +#define MATCH_KMDA32 0x38002077 +#define MASK_KMDA32 0xfe00707f +#define MATCH_KMXDA32 0x3a002077 +#define MASK_KMXDA32 0xfe00707f +#define MATCH_KMADS32 0x5c002077 +#define MASK_KMADS32 0xfe00707f +#define MATCH_KMADRS32 0x6c002077 +#define MASK_KMADRS32 0xfe00707f +#define MATCH_KMAXDS32 0x7c002077 +#define MASK_KMAXDS32 0xfe00707f +#define MATCH_KMSDA32 0x4c002077 +#define MASK_KMSDA32 0xfe00707f +#define MATCH_KMSXDA32 0x4e002077 +#define MASK_KMSXDA32 0xfe00707f +#define MATCH_KSLL32 0x64002077 +#define MASK_KSLL32 0xfe00707f +#define MATCH_KSLLI32 0x84002077 +#define MASK_KSLLI32 0xfe00707f +#define MATCH_KSLRA32 0x56002077 +#define MASK_KSLRA32 0xfe00707f +#define MATCH_KSLRA32_U 0x66002077 +#define MASK_KSLRA32_U 0xfe00707f +#define MATCH_KSTAS32 0xc0002077 +#define MASK_KSTAS32 0xfe00707f +#define MATCH_KSTSA32 0xc2002077 +#define MASK_KSTSA32 0xfe00707f +#define MATCH_KSUB32 0x12002077 +#define MASK_KSUB32 0xfe00707f +#define MATCH_PKBB32 0xe002077 +#define MASK_PKBB32 0xfe00707f +#define MATCH_PKBT32 0x1e002077 +#define MASK_PKBT32 0xfe00707f +#define MATCH_PKTT32 0x2e002077 +#define MASK_PKTT32 0xfe00707f +#define MATCH_PKTB32 0x3e002077 +#define MASK_PKTB32 0xfe00707f +#define MATCH_RADD32 0x2077 +#define MASK_RADD32 0xfe00707f +#define MATCH_RCRAS32 0x4002077 +#define MASK_RCRAS32 0xfe00707f +#define MATCH_RCRSA32 0x6002077 +#define MASK_RCRSA32 0xfe00707f +#define MATCH_RSTAS32 0xb0002077 +#define MASK_RSTAS32 0xfe00707f +#define MATCH_RSTSA32 0xb2002077 +#define MASK_RSTSA32 0xfe00707f +#define MATCH_RSUB32 0x2002077 +#define MASK_RSUB32 0xfe00707f +#define MATCH_SLL32 0x54002077 +#define MASK_SLL32 0xfe00707f +#define MATCH_SLLI32 0x74002077 +#define MASK_SLLI32 0xfe00707f +#define MATCH_SMAX32 0x92002077 +#define MASK_SMAX32 0xfe00707f +#define MATCH_SMBT32 0x18002077 +#define MASK_SMBT32 0xfe00707f +#define MATCH_SMTT32 0x28002077 +#define MASK_SMTT32 0xfe00707f +#define MATCH_SMDS32 0x58002077 +#define MASK_SMDS32 0xfe00707f +#define MATCH_SMDRS32 0x68002077 +#define MASK_SMDRS32 0xfe00707f +#define MATCH_SMXDS32 0x78002077 +#define MASK_SMXDS32 0xfe00707f +#define MATCH_SMIN32 0x90002077 +#define MASK_SMIN32 0xfe00707f +#define MATCH_SRA32 0x50002077 +#define MASK_SRA32 0xfe00707f +#define MATCH_SRA32_U 0x60002077 +#define MASK_SRA32_U 0xfe00707f +#define MATCH_SRAI32 0x70002077 +#define MASK_SRAI32 0xfe00707f +#define MATCH_SRAI32_U 0x80002077 +#define MASK_SRAI32_U 0xfe00707f +#define MATCH_SRAIW_U 0x34001077 +#define MASK_SRAIW_U 0xfe00707f +#define MATCH_SRL32 0x52002077 +#define MASK_SRL32 0xfe00707f +#define MATCH_SRL32_U 0x62002077 +#define MASK_SRL32_U 0xfe00707f +#define MATCH_SRLI32 0x72002077 +#define MASK_SRLI32 0xfe00707f +#define MATCH_SRLI32_U 0x82002077 +#define MASK_SRLI32_U 0xfe00707f +#define MATCH_STAS32 0xf0002077 +#define MASK_STAS32 0xfe00707f +#define MATCH_STSA32 0xf2002077 +#define MASK_STSA32 0xfe00707f +#define MATCH_SUB32 0x42002077 +#define MASK_SUB32 0xfe00707f +#define MATCH_UKADD32 0x30002077 +#define MASK_UKADD32 0xfe00707f +#define MATCH_UKCRAS32 0x34002077 +#define MASK_UKCRAS32 0xfe00707f +#define MATCH_UKCRSA32 0x36002077 +#define MASK_UKCRSA32 0xfe00707f +#define MATCH_UKSTAS32 0xe0002077 +#define MASK_UKSTAS32 0xfe00707f +#define MATCH_UKSTSA32 0xe2002077 +#define MASK_UKSTSA32 0xfe00707f +#define MATCH_UKSUB32 0x32002077 +#define MASK_UKSUB32 0xfe00707f +#define MATCH_UMAX32 0xa2002077 +#define MASK_UMAX32 0xfe00707f +#define MATCH_UMIN32 0xa0002077 +#define MASK_UMIN32 0xfe00707f +#define MATCH_URADD32 0x20002077 +#define MASK_URADD32 0xfe00707f +#define MATCH_URCRAS32 0x24002077 +#define MASK_URCRAS32 0xfe00707f +#define MATCH_URCRSA32 0x26002077 +#define MASK_URCRSA32 0xfe00707f +#define MATCH_URSTAS32 0xd0002077 +#define MASK_URSTAS32 0xfe00707f +#define MATCH_URSTSA32 0xd2002077 +#define MASK_URSTSA32 0xfe00707f +#define MATCH_URSUB32 0x22002077 +#define MASK_URSUB32 0xfe00707f +#define MATCH_VMVNFR_V 0x9e003057 +#define MASK_VMVNFR_V 0xfe00707f +#define MATCH_VL1R_V 0x2800007 +#define MASK_VL1R_V 0xfff0707f +#define MATCH_VL2R_V 0x6805007 +#define MASK_VL2R_V 0xfff0707f +#define MATCH_VL4R_V 0xe806007 +#define MASK_VL4R_V 0xfff0707f +#define MATCH_VL8R_V 0x1e807007 +#define MASK_VL8R_V 0xfff0707f +#define MATCH_VLE1_V 0x2b00007 +#define MASK_VLE1_V 0xfff0707f +#define MATCH_VSE1_V 0x2b00027 +#define MASK_VSE1_V 0xfff0707f +#define MATCH_VFREDSUM_VS 0x4001057 +#define MASK_VFREDSUM_VS 0xfc00707f +#define MATCH_VFWREDSUM_VS 0xc4001057 +#define MASK_VFWREDSUM_VS 0xfc00707f +#define MATCH_VPOPC_M 0x40082057 +#define MASK_VPOPC_M 0xfc0ff07f +#define MATCH_VMORNOT_MM 0x70002057 +#define MASK_VMORNOT_MM 0xfc00707f +#define MATCH_VMANDNOT_MM 0x60002057 +#define MASK_VMANDNOT_MM 0xfc00707f +#define CSR_FFLAGS 0x1 +#define CSR_FRM 0x2 +#define CSR_FCSR 0x3 +#define CSR_VSTART 0x8 +#define CSR_VXSAT 0x9 +#define CSR_VXRM 0xa +#define CSR_VCSR 0xf +#define CSR_SEED 0x15 +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_VL 0xc20 +#define CSR_VTYPE 0xc21 +#define CSR_VLENB 0xc22 +#define CSR_SSTATUS 0x100 +#define CSR_SEDELEG 0x102 +#define CSR_SIDELEG 0x103 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 +#define CSR_SENVCFG 0x10a +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 +#define CSR_SATP 0x180 +#define CSR_SCONTEXT 0x5a8 +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HIE 0x604 +#define CSR_HTIMEDELTA 0x605 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGEIE 0x607 +#define CSR_HENVCFG 0x60a +#define CSR_HTVAL 0x643 +#define CSR_HIP 0x644 +#define CSR_HVIP 0x645 +#define CSR_HTINST 0x64a +#define CSR_HGATP 0x680 +#define CSR_HCONTEXT 0x6a8 +#define CSR_HGEIP 0xe12 +#define CSR_UTVT 0x7 +#define CSR_UNXTI 0x45 +#define CSR_UINTSTATUS 0x46 +#define CSR_USCRATCHCSW 0x48 +#define CSR_USCRATCHCSWL 0x49 +#define CSR_STVT 0x107 +#define CSR_SNXTI 0x145 +#define CSR_SINTSTATUS 0x146 +#define CSR_SSCRATCHCSW 0x148 +#define CSR_SSCRATCHCSWL 0x149 +#define CSR_MTVT 0x307 +#define CSR_MNXTI 0x345 +#define CSR_MINTSTATUS 0x346 +#define CSR_MSCRATCHCSW 0x348 +#define CSR_MSCRATCHCSWL 0x349 +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MEDELEG 0x302 +#define CSR_MIDELEG 0x303 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MCOUNTEREN 0x306 +#define CSR_MENVCFG 0x30a +#define CSR_MCOUNTINHIBIT 0x320 +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 +#define CSR_MTINST 0x34a +#define CSR_MTVAL2 0x34b +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPCFG1 0x3a1 +#define CSR_PMPCFG2 0x3a2 +#define CSR_PMPCFG3 0x3a3 +#define CSR_PMPCFG4 0x3a4 +#define CSR_PMPCFG5 0x3a5 +#define CSR_PMPCFG6 0x3a6 +#define CSR_PMPCFG7 0x3a7 +#define CSR_PMPCFG8 0x3a8 +#define CSR_PMPCFG9 0x3a9 +#define CSR_PMPCFG10 0x3aa +#define CSR_PMPCFG11 0x3ab +#define CSR_PMPCFG12 0x3ac +#define CSR_PMPCFG13 0x3ad +#define CSR_PMPCFG14 0x3ae +#define CSR_PMPCFG15 0x3af +#define CSR_PMPADDR0 0x3b0 +#define CSR_PMPADDR1 0x3b1 +#define CSR_PMPADDR2 0x3b2 +#define CSR_PMPADDR3 0x3b3 +#define CSR_PMPADDR4 0x3b4 +#define CSR_PMPADDR5 0x3b5 +#define CSR_PMPADDR6 0x3b6 +#define CSR_PMPADDR7 0x3b7 +#define CSR_PMPADDR8 0x3b8 +#define CSR_PMPADDR9 0x3b9 +#define CSR_PMPADDR10 0x3ba +#define CSR_PMPADDR11 0x3bb +#define CSR_PMPADDR12 0x3bc +#define CSR_PMPADDR13 0x3bd +#define CSR_PMPADDR14 0x3be +#define CSR_PMPADDR15 0x3bf +#define CSR_PMPADDR16 0x3c0 +#define CSR_PMPADDR17 0x3c1 +#define CSR_PMPADDR18 0x3c2 +#define CSR_PMPADDR19 0x3c3 +#define CSR_PMPADDR20 0x3c4 +#define CSR_PMPADDR21 0x3c5 +#define CSR_PMPADDR22 0x3c6 +#define CSR_PMPADDR23 0x3c7 +#define CSR_PMPADDR24 0x3c8 +#define CSR_PMPADDR25 0x3c9 +#define CSR_PMPADDR26 0x3ca +#define CSR_PMPADDR27 0x3cb +#define CSR_PMPADDR28 0x3cc +#define CSR_PMPADDR29 0x3cd +#define CSR_PMPADDR30 0x3ce +#define CSR_PMPADDR31 0x3cf +#define CSR_PMPADDR32 0x3d0 +#define CSR_PMPADDR33 0x3d1 +#define CSR_PMPADDR34 0x3d2 +#define CSR_PMPADDR35 0x3d3 +#define CSR_PMPADDR36 0x3d4 +#define CSR_PMPADDR37 0x3d5 +#define CSR_PMPADDR38 0x3d6 +#define CSR_PMPADDR39 0x3d7 +#define CSR_PMPADDR40 0x3d8 +#define CSR_PMPADDR41 0x3d9 +#define CSR_PMPADDR42 0x3da +#define CSR_PMPADDR43 0x3db +#define CSR_PMPADDR44 0x3dc +#define CSR_PMPADDR45 0x3dd +#define CSR_PMPADDR46 0x3de +#define CSR_PMPADDR47 0x3df +#define CSR_PMPADDR48 0x3e0 +#define CSR_PMPADDR49 0x3e1 +#define CSR_PMPADDR50 0x3e2 +#define CSR_PMPADDR51 0x3e3 +#define CSR_PMPADDR52 0x3e4 +#define CSR_PMPADDR53 0x3e5 +#define CSR_PMPADDR54 0x3e6 +#define CSR_PMPADDR55 0x3e7 +#define CSR_PMPADDR56 0x3e8 +#define CSR_PMPADDR57 0x3e9 +#define CSR_PMPADDR58 0x3ea +#define CSR_PMPADDR59 0x3eb +#define CSR_PMPADDR60 0x3ec +#define CSR_PMPADDR61 0x3ed +#define CSR_PMPADDR62 0x3ee +#define CSR_PMPADDR63 0x3ef +#define CSR_MSECCFG 0x747 +#define CSR_TSELECT 0x7a0 +#define CSR_TDATA1 0x7a1 +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA3 0x7a3 +#define CSR_TINFO 0x7a4 +#define CSR_TCONTROL 0x7a5 +#define CSR_MCONTEXT 0x7a8 +#define CSR_MSCONTEXT 0x7aa +#define CSR_DCSR 0x7b0 +#define CSR_DPC 0x7b1 +#define CSR_DSCRATCH0 0x7b2 +#define CSR_DSCRATCH1 0x7b3 +#define CSR_MCYCLE 0xb00 +#define CSR_MINSTRET 0xb02 +#define CSR_MHPMCOUNTER3 0xb03 +#define CSR_MHPMCOUNTER4 0xb04 +#define CSR_MHPMCOUNTER5 0xb05 +#define CSR_MHPMCOUNTER6 0xb06 +#define CSR_MHPMCOUNTER7 0xb07 +#define CSR_MHPMCOUNTER8 0xb08 +#define CSR_MHPMCOUNTER9 0xb09 +#define CSR_MHPMCOUNTER10 0xb0a +#define CSR_MHPMCOUNTER11 0xb0b +#define CSR_MHPMCOUNTER12 0xb0c +#define CSR_MHPMCOUNTER13 0xb0d +#define CSR_MHPMCOUNTER14 0xb0e +#define CSR_MHPMCOUNTER15 0xb0f +#define CSR_MHPMCOUNTER16 0xb10 +#define CSR_MHPMCOUNTER17 0xb11 +#define CSR_MHPMCOUNTER18 0xb12 +#define CSR_MHPMCOUNTER19 0xb13 +#define CSR_MHPMCOUNTER20 0xb14 +#define CSR_MHPMCOUNTER21 0xb15 +#define CSR_MHPMCOUNTER22 0xb16 +#define CSR_MHPMCOUNTER23 0xb17 +#define CSR_MHPMCOUNTER24 0xb18 +#define CSR_MHPMCOUNTER25 0xb19 +#define CSR_MHPMCOUNTER26 0xb1a +#define CSR_MHPMCOUNTER27 0xb1b +#define CSR_MHPMCOUNTER28 0xb1c +#define CSR_MHPMCOUNTER29 0xb1d +#define CSR_MHPMCOUNTER30 0xb1e +#define CSR_MHPMCOUNTER31 0xb1f +#define CSR_MHPMEVENT3 0x323 +#define CSR_MHPMEVENT4 0x324 +#define CSR_MHPMEVENT5 0x325 +#define CSR_MHPMEVENT6 0x326 +#define CSR_MHPMEVENT7 0x327 +#define CSR_MHPMEVENT8 0x328 +#define CSR_MHPMEVENT9 0x329 +#define CSR_MHPMEVENT10 0x32a +#define CSR_MHPMEVENT11 0x32b +#define CSR_MHPMEVENT12 0x32c +#define CSR_MHPMEVENT13 0x32d +#define CSR_MHPMEVENT14 0x32e +#define CSR_MHPMEVENT15 0x32f +#define CSR_MHPMEVENT16 0x330 +#define CSR_MHPMEVENT17 0x331 +#define CSR_MHPMEVENT18 0x332 +#define CSR_MHPMEVENT19 0x333 +#define CSR_MHPMEVENT20 0x334 +#define CSR_MHPMEVENT21 0x335 +#define CSR_MHPMEVENT22 0x336 +#define CSR_MHPMEVENT23 0x337 +#define CSR_MHPMEVENT24 0x338 +#define CSR_MHPMEVENT25 0x339 +#define CSR_MHPMEVENT26 0x33a +#define CSR_MHPMEVENT27 0x33b +#define CSR_MHPMEVENT28 0x33c +#define CSR_MHPMEVENT29 0x33d +#define CSR_MHPMEVENT30 0x33e +#define CSR_MHPMEVENT31 0x33f +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 +#define CSR_MCONFIGPTR 0xf15 +#define CSR_HTIMEDELTAH 0x615 +#define CSR_HENVCFGH 0x61a +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f +#define CSR_MSTATUSH 0x310 +#define CSR_MENVCFGH 0x31a +#define CSR_MSECCFGH 0x757 +#define CSR_MCYCLEH 0xb80 +#define CSR_MINSTRETH 0xb82 +#define CSR_MHPMCOUNTER3H 0xb83 +#define CSR_MHPMCOUNTER4H 0xb84 +#define CSR_MHPMCOUNTER5H 0xb85 +#define CSR_MHPMCOUNTER6H 0xb86 +#define CSR_MHPMCOUNTER7H 0xb87 +#define CSR_MHPMCOUNTER8H 0xb88 +#define CSR_MHPMCOUNTER9H 0xb89 +#define CSR_MHPMCOUNTER10H 0xb8a +#define CSR_MHPMCOUNTER11H 0xb8b +#define CSR_MHPMCOUNTER12H 0xb8c +#define CSR_MHPMCOUNTER13H 0xb8d +#define CSR_MHPMCOUNTER14H 0xb8e +#define CSR_MHPMCOUNTER15H 0xb8f +#define CSR_MHPMCOUNTER16H 0xb90 +#define CSR_MHPMCOUNTER17H 0xb91 +#define CSR_MHPMCOUNTER18H 0xb92 +#define CSR_MHPMCOUNTER19H 0xb93 +#define CSR_MHPMCOUNTER20H 0xb94 +#define CSR_MHPMCOUNTER21H 0xb95 +#define CSR_MHPMCOUNTER22H 0xb96 +#define CSR_MHPMCOUNTER23H 0xb97 +#define CSR_MHPMCOUNTER24H 0xb98 +#define CSR_MHPMCOUNTER25H 0xb99 +#define CSR_MHPMCOUNTER26H 0xb9a +#define CSR_MHPMCOUNTER27H 0xb9b +#define CSR_MHPMCOUNTER28H 0xb9c +#define CSR_MHPMCOUNTER29H 0xb9d +#define CSR_MHPMCOUNTER30H 0xb9e +#define CSR_MHPMCOUNTER31H 0xb9f +#define CAUSE_MISALIGNED_FETCH 0x0 +#define CAUSE_FETCH_ACCESS 0x1 +#define CAUSE_ILLEGAL_INSTRUCTION 0x2 +#define CAUSE_BREAKPOINT 0x3 +#define CAUSE_MISALIGNED_LOAD 0x4 +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_MISALIGNED_STORE 0x6 +#define CAUSE_STORE_ACCESS 0x7 +#define CAUSE_USER_ECALL 0x8 +#define CAUSE_SUPERVISOR_ECALL 0x9 +#define CAUSE_VIRTUAL_SUPERVISOR_ECALL 0xa +#define CAUSE_MACHINE_ECALL 0xb +#define CAUSE_FETCH_PAGE_FAULT 0xc +#define CAUSE_LOAD_PAGE_FAULT 0xd +#define CAUSE_STORE_PAGE_FAULT 0xf +#define CAUSE_FETCH_GUEST_PAGE_FAULT 0x14 +#define CAUSE_LOAD_GUEST_PAGE_FAULT 0x15 +#define CAUSE_VIRTUAL_INSTRUCTION 0x16 +#define CAUSE_STORE_GUEST_PAGE_FAULT 0x17 +#endif +#ifdef DECLARE_INSN +DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32) +DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32) +DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32) +DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS) +DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS) +DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI) +DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM) +DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM) +DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI) +DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR) +DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR) +DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE) +DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME) +DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET) +DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH) +DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH) +DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH) +DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL) +DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK) +DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S) +DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X) +DECLARE_INSN(fence_tso, MATCH_FENCE_TSO, MASK_FENCE_TSO) +DECLARE_INSN(pause, MATCH_PAUSE, MASK_PAUSE) +DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) +DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) +DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) +DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) +DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) +DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) +DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) +DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) +DECLARE_INSN(lui, MATCH_LUI, MASK_LUI) +DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC) +DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI) +DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI) +DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU) +DECLARE_INSN(xori, MATCH_XORI, MASK_XORI) +DECLARE_INSN(ori, MATCH_ORI, MASK_ORI) +DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI) +DECLARE_INSN(add, MATCH_ADD, MASK_ADD) +DECLARE_INSN(sub, MATCH_SUB, MASK_SUB) +DECLARE_INSN(sll, MATCH_SLL, MASK_SLL) +DECLARE_INSN(slt, MATCH_SLT, MASK_SLT) +DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU) +DECLARE_INSN(xor, MATCH_XOR, MASK_XOR) +DECLARE_INSN(srl, MATCH_SRL, MASK_SRL) +DECLARE_INSN(sra, MATCH_SRA, MASK_SRA) +DECLARE_INSN(or, MATCH_OR, MASK_OR) +DECLARE_INSN(and, MATCH_AND, MASK_AND) +DECLARE_INSN(lb, MATCH_LB, MASK_LB) +DECLARE_INSN(lh, MATCH_LH, MASK_LH) +DECLARE_INSN(lw, MATCH_LW, MASK_LW) +DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU) +DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU) +DECLARE_INSN(sb, MATCH_SB, MASK_SB) +DECLARE_INSN(sh, MATCH_SH, MASK_SH) +DECLARE_INSN(sw, MATCH_SW, MASK_SW) +DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE) +DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I) +DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW) +DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW) +DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW) +DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW) +DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW) +DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW) +DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW) +DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW) +DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW) +DECLARE_INSN(ld, MATCH_LD, MASK_LD) +DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU) +DECLARE_INSN(sd, MATCH_SD, MASK_SD) +DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI) +DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI) +DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI) +DECLARE_INSN(mul, MATCH_MUL, MASK_MUL) +DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH) +DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU) +DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU) +DECLARE_INSN(div, MATCH_DIV, MASK_DIV) +DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU) +DECLARE_INSN(rem, MATCH_REM, MASK_REM) +DECLARE_INSN(remu, MATCH_REMU, MASK_REMU) +DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW) +DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW) +DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW) +DECLARE_INSN(remw, MATCH_REMW, MASK_REMW) +DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW) +DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W) +DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W) +DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W) +DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W) +DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W) +DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W) +DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W) +DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W) +DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W) +DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W) +DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W) +DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D) +DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D) +DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D) +DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D) +DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D) +DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D) +DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D) +DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D) +DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D) +DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D) +DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D) +DECLARE_INSN(hfence_vvma, MATCH_HFENCE_VVMA, MASK_HFENCE_VVMA) +DECLARE_INSN(hfence_gvma, MATCH_HFENCE_GVMA, MASK_HFENCE_GVMA) +DECLARE_INSN(hlv_b, MATCH_HLV_B, MASK_HLV_B) +DECLARE_INSN(hlv_bu, MATCH_HLV_BU, MASK_HLV_BU) +DECLARE_INSN(hlv_h, MATCH_HLV_H, MASK_HLV_H) +DECLARE_INSN(hlv_hu, MATCH_HLV_HU, MASK_HLV_HU) +DECLARE_INSN(hlvx_hu, MATCH_HLVX_HU, MASK_HLVX_HU) +DECLARE_INSN(hlv_w, MATCH_HLV_W, MASK_HLV_W) +DECLARE_INSN(hlvx_wu, MATCH_HLVX_WU, MASK_HLVX_WU) +DECLARE_INSN(hsv_b, MATCH_HSV_B, MASK_HSV_B) +DECLARE_INSN(hsv_h, MATCH_HSV_H, MASK_HSV_H) +DECLARE_INSN(hsv_w, MATCH_HSV_W, MASK_HSV_W) +DECLARE_INSN(hlv_wu, MATCH_HLV_WU, MASK_HLV_WU) +DECLARE_INSN(hlv_d, MATCH_HLV_D, MASK_HLV_D) +DECLARE_INSN(hsv_d, MATCH_HSV_D, MASK_HSV_D) +DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S) +DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S) +DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S) +DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S) +DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S) +DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S) +DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S) +DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S) +DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S) +DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S) +DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S) +DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S) +DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S) +DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S) +DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S) +DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W) +DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S) +DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W) +DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU) +DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X) +DECLARE_INSN(flw, MATCH_FLW, MASK_FLW) +DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW) +DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S) +DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S) +DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S) +DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S) +DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S) +DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S) +DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L) +DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU) +DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D) +DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D) +DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D) +DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D) +DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D) +DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D) +DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D) +DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D) +DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D) +DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D) +DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S) +DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D) +DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D) +DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D) +DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D) +DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D) +DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D) +DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D) +DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W) +DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU) +DECLARE_INSN(fld, MATCH_FLD, MASK_FLD) +DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD) +DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D) +DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D) +DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D) +DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D) +DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D) +DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D) +DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D) +DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L) +DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU) +DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X) +DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q) +DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q) +DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q) +DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q) +DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q) +DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q) +DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q) +DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q) +DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q) +DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q) +DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S) +DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q) +DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D) +DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q) +DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q) +DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q) +DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q) +DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q) +DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q) +DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q) +DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W) +DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU) +DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ) +DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ) +DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q) +DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q) +DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q) +DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q) +DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q) +DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q) +DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L) +DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU) +DECLARE_INSN(andn, MATCH_ANDN, MASK_ANDN) +DECLARE_INSN(orn, MATCH_ORN, MASK_ORN) +DECLARE_INSN(xnor, MATCH_XNOR, MASK_XNOR) +DECLARE_INSN(slo, MATCH_SLO, MASK_SLO) +DECLARE_INSN(sro, MATCH_SRO, MASK_SRO) +DECLARE_INSN(rol, MATCH_ROL, MASK_ROL) +DECLARE_INSN(ror, MATCH_ROR, MASK_ROR) +DECLARE_INSN(bclr, MATCH_BCLR, MASK_BCLR) +DECLARE_INSN(bset, MATCH_BSET, MASK_BSET) +DECLARE_INSN(binv, MATCH_BINV, MASK_BINV) +DECLARE_INSN(bext, MATCH_BEXT, MASK_BEXT) +DECLARE_INSN(gorc, MATCH_GORC, MASK_GORC) +DECLARE_INSN(grev, MATCH_GREV, MASK_GREV) +DECLARE_INSN(sloi, MATCH_SLOI, MASK_SLOI) +DECLARE_INSN(sroi, MATCH_SROI, MASK_SROI) +DECLARE_INSN(rori, MATCH_RORI, MASK_RORI) +DECLARE_INSN(bclri, MATCH_BCLRI, MASK_BCLRI) +DECLARE_INSN(bseti, MATCH_BSETI, MASK_BSETI) +DECLARE_INSN(binvi, MATCH_BINVI, MASK_BINVI) +DECLARE_INSN(bexti, MATCH_BEXTI, MASK_BEXTI) +DECLARE_INSN(gorci, MATCH_GORCI, MASK_GORCI) +DECLARE_INSN(grevi, MATCH_GREVI, MASK_GREVI) +DECLARE_INSN(cmix, MATCH_CMIX, MASK_CMIX) +DECLARE_INSN(cmov, MATCH_CMOV, MASK_CMOV) +DECLARE_INSN(fsl, MATCH_FSL, MASK_FSL) +DECLARE_INSN(fsr, MATCH_FSR, MASK_FSR) +DECLARE_INSN(fsri, MATCH_FSRI, MASK_FSRI) +DECLARE_INSN(clz, MATCH_CLZ, MASK_CLZ) +DECLARE_INSN(ctz, MATCH_CTZ, MASK_CTZ) +DECLARE_INSN(cpop, MATCH_CPOP, MASK_CPOP) +DECLARE_INSN(sext_b, MATCH_SEXT_B, MASK_SEXT_B) +DECLARE_INSN(sext_h, MATCH_SEXT_H, MASK_SEXT_H) +DECLARE_INSN(crc32_b, MATCH_CRC32_B, MASK_CRC32_B) +DECLARE_INSN(crc32_h, MATCH_CRC32_H, MASK_CRC32_H) +DECLARE_INSN(crc32_w, MATCH_CRC32_W, MASK_CRC32_W) +DECLARE_INSN(crc32c_b, MATCH_CRC32C_B, MASK_CRC32C_B) +DECLARE_INSN(crc32c_h, MATCH_CRC32C_H, MASK_CRC32C_H) +DECLARE_INSN(crc32c_w, MATCH_CRC32C_W, MASK_CRC32C_W) +DECLARE_INSN(sh1add, MATCH_SH1ADD, MASK_SH1ADD) +DECLARE_INSN(sh2add, MATCH_SH2ADD, MASK_SH2ADD) +DECLARE_INSN(sh3add, MATCH_SH3ADD, MASK_SH3ADD) +DECLARE_INSN(clmul, MATCH_CLMUL, MASK_CLMUL) +DECLARE_INSN(clmulr, MATCH_CLMULR, MASK_CLMULR) +DECLARE_INSN(clmulh, MATCH_CLMULH, MASK_CLMULH) +DECLARE_INSN(min, MATCH_MIN, MASK_MIN) +DECLARE_INSN(minu, MATCH_MINU, MASK_MINU) +DECLARE_INSN(max, MATCH_MAX, MASK_MAX) +DECLARE_INSN(maxu, MATCH_MAXU, MASK_MAXU) +DECLARE_INSN(shfl, MATCH_SHFL, MASK_SHFL) +DECLARE_INSN(unshfl, MATCH_UNSHFL, MASK_UNSHFL) +DECLARE_INSN(bcompress, MATCH_BCOMPRESS, MASK_BCOMPRESS) +DECLARE_INSN(bdecompress, MATCH_BDECOMPRESS, MASK_BDECOMPRESS) +DECLARE_INSN(pack, MATCH_PACK, MASK_PACK) +DECLARE_INSN(packu, MATCH_PACKU, MASK_PACKU) +DECLARE_INSN(packh, MATCH_PACKH, MASK_PACKH) +DECLARE_INSN(bfp, MATCH_BFP, MASK_BFP) +DECLARE_INSN(shfli, MATCH_SHFLI, MASK_SHFLI) +DECLARE_INSN(unshfli, MATCH_UNSHFLI, MASK_UNSHFLI) +DECLARE_INSN(xperm4, MATCH_XPERM4, MASK_XPERM4) +DECLARE_INSN(xperm8, MATCH_XPERM8, MASK_XPERM8) +DECLARE_INSN(xperm16, MATCH_XPERM16, MASK_XPERM16) +DECLARE_INSN(bmatflip, MATCH_BMATFLIP, MASK_BMATFLIP) +DECLARE_INSN(crc32_d, MATCH_CRC32_D, MASK_CRC32_D) +DECLARE_INSN(crc32c_d, MATCH_CRC32C_D, MASK_CRC32C_D) +DECLARE_INSN(bmator, MATCH_BMATOR, MASK_BMATOR) +DECLARE_INSN(bmatxor, MATCH_BMATXOR, MASK_BMATXOR) +DECLARE_INSN(slli_uw, MATCH_SLLI_UW, MASK_SLLI_UW) +DECLARE_INSN(add_uw, MATCH_ADD_UW, MASK_ADD_UW) +DECLARE_INSN(slow, MATCH_SLOW, MASK_SLOW) +DECLARE_INSN(srow, MATCH_SROW, MASK_SROW) +DECLARE_INSN(rolw, MATCH_ROLW, MASK_ROLW) +DECLARE_INSN(rorw, MATCH_RORW, MASK_RORW) +DECLARE_INSN(gorcw, MATCH_GORCW, MASK_GORCW) +DECLARE_INSN(grevw, MATCH_GREVW, MASK_GREVW) +DECLARE_INSN(sloiw, MATCH_SLOIW, MASK_SLOIW) +DECLARE_INSN(sroiw, MATCH_SROIW, MASK_SROIW) +DECLARE_INSN(roriw, MATCH_RORIW, MASK_RORIW) +DECLARE_INSN(gorciw, MATCH_GORCIW, MASK_GORCIW) +DECLARE_INSN(greviw, MATCH_GREVIW, MASK_GREVIW) +DECLARE_INSN(fslw, MATCH_FSLW, MASK_FSLW) +DECLARE_INSN(fsrw, MATCH_FSRW, MASK_FSRW) +DECLARE_INSN(fsriw, MATCH_FSRIW, MASK_FSRIW) +DECLARE_INSN(clzw, MATCH_CLZW, MASK_CLZW) +DECLARE_INSN(ctzw, MATCH_CTZW, MASK_CTZW) +DECLARE_INSN(cpopw, MATCH_CPOPW, MASK_CPOPW) +DECLARE_INSN(sh1add_uw, MATCH_SH1ADD_UW, MASK_SH1ADD_UW) +DECLARE_INSN(sh2add_uw, MATCH_SH2ADD_UW, MASK_SH2ADD_UW) +DECLARE_INSN(sh3add_uw, MATCH_SH3ADD_UW, MASK_SH3ADD_UW) +DECLARE_INSN(shflw, MATCH_SHFLW, MASK_SHFLW) +DECLARE_INSN(unshflw, MATCH_UNSHFLW, MASK_UNSHFLW) +DECLARE_INSN(bcompressw, MATCH_BCOMPRESSW, MASK_BCOMPRESSW) +DECLARE_INSN(bdecompressw, MATCH_BDECOMPRESSW, MASK_BDECOMPRESSW) +DECLARE_INSN(packw, MATCH_PACKW, MASK_PACKW) +DECLARE_INSN(packuw, MATCH_PACKUW, MASK_PACKUW) +DECLARE_INSN(bfpw, MATCH_BFPW, MASK_BFPW) +DECLARE_INSN(xperm32, MATCH_XPERM32, MASK_XPERM32) +DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL) +DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK) +DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) +DECLARE_INSN(mret, MATCH_MRET, MASK_MRET) +DECLARE_INSN(dret, MATCH_DRET, MASK_DRET) +DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA) +DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI) +DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW) +DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS) +DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC) +DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI) +DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI) +DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI) +DECLARE_INSN(sinval_vma, MATCH_SINVAL_VMA, MASK_SINVAL_VMA) +DECLARE_INSN(sfence_w_inval, MATCH_SFENCE_W_INVAL, MASK_SFENCE_W_INVAL) +DECLARE_INSN(sfence_inval_ir, MATCH_SFENCE_INVAL_IR, MASK_SFENCE_INVAL_IR) +DECLARE_INSN(hinval_vvma, MATCH_HINVAL_VVMA, MASK_HINVAL_VVMA) +DECLARE_INSN(hinval_gvma, MATCH_HINVAL_GVMA, MASK_HINVAL_GVMA) +DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H) +DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H) +DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H) +DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H) +DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H) +DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H) +DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H) +DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H) +DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H) +DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S) +DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H) +DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H) +DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H) +DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H) +DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H) +DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H) +DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H) +DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H) +DECLARE_INSN(fclass_h, MATCH_FCLASS_H, MASK_FCLASS_H) +DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W) +DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU) +DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X) +DECLARE_INSN(flh, MATCH_FLH, MASK_FLH) +DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH) +DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H) +DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H) +DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H) +DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H) +DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D) +DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H) +DECLARE_INSN(fcvt_h_q, MATCH_FCVT_H_Q, MASK_FCVT_H_Q) +DECLARE_INSN(fcvt_q_h, MATCH_FCVT_Q_H, MASK_FCVT_Q_H) +DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H) +DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H) +DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L) +DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU) +DECLARE_INSN(sm4ed, MATCH_SM4ED, MASK_SM4ED) +DECLARE_INSN(sm4ks, MATCH_SM4KS, MASK_SM4KS) +DECLARE_INSN(sm3p0, MATCH_SM3P0, MASK_SM3P0) +DECLARE_INSN(sm3p1, MATCH_SM3P1, MASK_SM3P1) +DECLARE_INSN(sha256sum0, MATCH_SHA256SUM0, MASK_SHA256SUM0) +DECLARE_INSN(sha256sum1, MATCH_SHA256SUM1, MASK_SHA256SUM1) +DECLARE_INSN(sha256sig0, MATCH_SHA256SIG0, MASK_SHA256SIG0) +DECLARE_INSN(sha256sig1, MATCH_SHA256SIG1, MASK_SHA256SIG1) +DECLARE_INSN(aes32esmi, MATCH_AES32ESMI, MASK_AES32ESMI) +DECLARE_INSN(aes32esi, MATCH_AES32ESI, MASK_AES32ESI) +DECLARE_INSN(aes32dsmi, MATCH_AES32DSMI, MASK_AES32DSMI) +DECLARE_INSN(aes32dsi, MATCH_AES32DSI, MASK_AES32DSI) +DECLARE_INSN(sha512sum0r, MATCH_SHA512SUM0R, MASK_SHA512SUM0R) +DECLARE_INSN(sha512sum1r, MATCH_SHA512SUM1R, MASK_SHA512SUM1R) +DECLARE_INSN(sha512sig0l, MATCH_SHA512SIG0L, MASK_SHA512SIG0L) +DECLARE_INSN(sha512sig0h, MATCH_SHA512SIG0H, MASK_SHA512SIG0H) +DECLARE_INSN(sha512sig1l, MATCH_SHA512SIG1L, MASK_SHA512SIG1L) +DECLARE_INSN(sha512sig1h, MATCH_SHA512SIG1H, MASK_SHA512SIG1H) +DECLARE_INSN(aes64ks1i, MATCH_AES64KS1I, MASK_AES64KS1I) +DECLARE_INSN(aes64im, MATCH_AES64IM, MASK_AES64IM) +DECLARE_INSN(aes64ks2, MATCH_AES64KS2, MASK_AES64KS2) +DECLARE_INSN(aes64esm, MATCH_AES64ESM, MASK_AES64ESM) +DECLARE_INSN(aes64es, MATCH_AES64ES, MASK_AES64ES) +DECLARE_INSN(aes64dsm, MATCH_AES64DSM, MASK_AES64DSM) +DECLARE_INSN(aes64ds, MATCH_AES64DS, MASK_AES64DS) +DECLARE_INSN(sha512sum0, MATCH_SHA512SUM0, MASK_SHA512SUM0) +DECLARE_INSN(sha512sum1, MATCH_SHA512SUM1, MASK_SHA512SUM1) +DECLARE_INSN(sha512sig0, MATCH_SHA512SIG0, MASK_SHA512SIG0) +DECLARE_INSN(sha512sig1, MATCH_SHA512SIG1, MASK_SHA512SIG1) +DECLARE_INSN(cbo_clean, MATCH_CBO_CLEAN, MASK_CBO_CLEAN) +DECLARE_INSN(cbo_flush, MATCH_CBO_FLUSH, MASK_CBO_FLUSH) +DECLARE_INSN(cbo_inval, MATCH_CBO_INVAL, MASK_CBO_INVAL) +DECLARE_INSN(cbo_zero, MATCH_CBO_ZERO, MASK_CBO_ZERO) +DECLARE_INSN(prefetch_i, MATCH_PREFETCH_I, MASK_PREFETCH_I) +DECLARE_INSN(prefetch_r, MATCH_PREFETCH_R, MASK_PREFETCH_R) +DECLARE_INSN(prefetch_w, MATCH_PREFETCH_W, MASK_PREFETCH_W) +DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP) +DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP) +DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) +DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) +DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK) +DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN) +DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD) +DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW) +DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW) +DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD) +DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW) +DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW) +DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI) +DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) +DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI) +DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI) +DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI) +DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI) +DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI) +DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB) +DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR) +DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR) +DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND) +DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) +DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) +DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) +DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI) +DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP) +DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP) +DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP) +DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV) +DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD) +DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP) +DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP) +DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP) +DECLARE_INSN(c_srli_rv32, MATCH_C_SRLI_RV32, MASK_C_SRLI_RV32) +DECLARE_INSN(c_srai_rv32, MATCH_C_SRAI_RV32, MASK_C_SRAI_RV32) +DECLARE_INSN(c_slli_rv32, MATCH_C_SLLI_RV32, MASK_C_SLLI_RV32) +DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD) +DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD) +DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW) +DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW) +DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW) +DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP) +DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP) +DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0) +DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1) +DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2) +DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD) +DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1) +DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2) +DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1) +DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1) +DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2) +DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD) +DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1) +DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2) +DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2) +DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1) +DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2) +DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD) +DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1) +DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2) +DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3) +DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1) +DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2) +DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD) +DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1) +DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2) +DECLARE_INSN(vsetivli, MATCH_VSETIVLI, MASK_VSETIVLI) +DECLARE_INSN(vsetvli, MATCH_VSETVLI, MASK_VSETVLI) +DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL) +DECLARE_INSN(vlm_v, MATCH_VLM_V, MASK_VLM_V) +DECLARE_INSN(vsm_v, MATCH_VSM_V, MASK_VSM_V) +DECLARE_INSN(vle8_v, MATCH_VLE8_V, MASK_VLE8_V) +DECLARE_INSN(vle16_v, MATCH_VLE16_V, MASK_VLE16_V) +DECLARE_INSN(vle32_v, MATCH_VLE32_V, MASK_VLE32_V) +DECLARE_INSN(vle64_v, MATCH_VLE64_V, MASK_VLE64_V) +DECLARE_INSN(vle128_v, MATCH_VLE128_V, MASK_VLE128_V) +DECLARE_INSN(vle256_v, MATCH_VLE256_V, MASK_VLE256_V) +DECLARE_INSN(vle512_v, MATCH_VLE512_V, MASK_VLE512_V) +DECLARE_INSN(vle1024_v, MATCH_VLE1024_V, MASK_VLE1024_V) +DECLARE_INSN(vse8_v, MATCH_VSE8_V, MASK_VSE8_V) +DECLARE_INSN(vse16_v, MATCH_VSE16_V, MASK_VSE16_V) +DECLARE_INSN(vse32_v, MATCH_VSE32_V, MASK_VSE32_V) +DECLARE_INSN(vse64_v, MATCH_VSE64_V, MASK_VSE64_V) +DECLARE_INSN(vse128_v, MATCH_VSE128_V, MASK_VSE128_V) +DECLARE_INSN(vse256_v, MATCH_VSE256_V, MASK_VSE256_V) +DECLARE_INSN(vse512_v, MATCH_VSE512_V, MASK_VSE512_V) +DECLARE_INSN(vse1024_v, MATCH_VSE1024_V, MASK_VSE1024_V) +DECLARE_INSN(vluxei8_v, MATCH_VLUXEI8_V, MASK_VLUXEI8_V) +DECLARE_INSN(vluxei16_v, MATCH_VLUXEI16_V, MASK_VLUXEI16_V) +DECLARE_INSN(vluxei32_v, MATCH_VLUXEI32_V, MASK_VLUXEI32_V) +DECLARE_INSN(vluxei64_v, MATCH_VLUXEI64_V, MASK_VLUXEI64_V) +DECLARE_INSN(vluxei128_v, MATCH_VLUXEI128_V, MASK_VLUXEI128_V) +DECLARE_INSN(vluxei256_v, MATCH_VLUXEI256_V, MASK_VLUXEI256_V) +DECLARE_INSN(vluxei512_v, MATCH_VLUXEI512_V, MASK_VLUXEI512_V) +DECLARE_INSN(vluxei1024_v, MATCH_VLUXEI1024_V, MASK_VLUXEI1024_V) +DECLARE_INSN(vsuxei8_v, MATCH_VSUXEI8_V, MASK_VSUXEI8_V) +DECLARE_INSN(vsuxei16_v, MATCH_VSUXEI16_V, MASK_VSUXEI16_V) +DECLARE_INSN(vsuxei32_v, MATCH_VSUXEI32_V, MASK_VSUXEI32_V) +DECLARE_INSN(vsuxei64_v, MATCH_VSUXEI64_V, MASK_VSUXEI64_V) +DECLARE_INSN(vsuxei128_v, MATCH_VSUXEI128_V, MASK_VSUXEI128_V) +DECLARE_INSN(vsuxei256_v, MATCH_VSUXEI256_V, MASK_VSUXEI256_V) +DECLARE_INSN(vsuxei512_v, MATCH_VSUXEI512_V, MASK_VSUXEI512_V) +DECLARE_INSN(vsuxei1024_v, MATCH_VSUXEI1024_V, MASK_VSUXEI1024_V) +DECLARE_INSN(vlse8_v, MATCH_VLSE8_V, MASK_VLSE8_V) +DECLARE_INSN(vlse16_v, MATCH_VLSE16_V, MASK_VLSE16_V) +DECLARE_INSN(vlse32_v, MATCH_VLSE32_V, MASK_VLSE32_V) +DECLARE_INSN(vlse64_v, MATCH_VLSE64_V, MASK_VLSE64_V) +DECLARE_INSN(vlse128_v, MATCH_VLSE128_V, MASK_VLSE128_V) +DECLARE_INSN(vlse256_v, MATCH_VLSE256_V, MASK_VLSE256_V) +DECLARE_INSN(vlse512_v, MATCH_VLSE512_V, MASK_VLSE512_V) +DECLARE_INSN(vlse1024_v, MATCH_VLSE1024_V, MASK_VLSE1024_V) +DECLARE_INSN(vsse8_v, MATCH_VSSE8_V, MASK_VSSE8_V) +DECLARE_INSN(vsse16_v, MATCH_VSSE16_V, MASK_VSSE16_V) +DECLARE_INSN(vsse32_v, MATCH_VSSE32_V, MASK_VSSE32_V) +DECLARE_INSN(vsse64_v, MATCH_VSSE64_V, MASK_VSSE64_V) +DECLARE_INSN(vsse128_v, MATCH_VSSE128_V, MASK_VSSE128_V) +DECLARE_INSN(vsse256_v, MATCH_VSSE256_V, MASK_VSSE256_V) +DECLARE_INSN(vsse512_v, MATCH_VSSE512_V, MASK_VSSE512_V) +DECLARE_INSN(vsse1024_v, MATCH_VSSE1024_V, MASK_VSSE1024_V) +DECLARE_INSN(vloxei8_v, MATCH_VLOXEI8_V, MASK_VLOXEI8_V) +DECLARE_INSN(vloxei16_v, MATCH_VLOXEI16_V, MASK_VLOXEI16_V) +DECLARE_INSN(vloxei32_v, MATCH_VLOXEI32_V, MASK_VLOXEI32_V) +DECLARE_INSN(vloxei64_v, MATCH_VLOXEI64_V, MASK_VLOXEI64_V) +DECLARE_INSN(vloxei128_v, MATCH_VLOXEI128_V, MASK_VLOXEI128_V) +DECLARE_INSN(vloxei256_v, MATCH_VLOXEI256_V, MASK_VLOXEI256_V) +DECLARE_INSN(vloxei512_v, MATCH_VLOXEI512_V, MASK_VLOXEI512_V) +DECLARE_INSN(vloxei1024_v, MATCH_VLOXEI1024_V, MASK_VLOXEI1024_V) +DECLARE_INSN(vsoxei8_v, MATCH_VSOXEI8_V, MASK_VSOXEI8_V) +DECLARE_INSN(vsoxei16_v, MATCH_VSOXEI16_V, MASK_VSOXEI16_V) +DECLARE_INSN(vsoxei32_v, MATCH_VSOXEI32_V, MASK_VSOXEI32_V) +DECLARE_INSN(vsoxei64_v, MATCH_VSOXEI64_V, MASK_VSOXEI64_V) +DECLARE_INSN(vsoxei128_v, MATCH_VSOXEI128_V, MASK_VSOXEI128_V) +DECLARE_INSN(vsoxei256_v, MATCH_VSOXEI256_V, MASK_VSOXEI256_V) +DECLARE_INSN(vsoxei512_v, MATCH_VSOXEI512_V, MASK_VSOXEI512_V) +DECLARE_INSN(vsoxei1024_v, MATCH_VSOXEI1024_V, MASK_VSOXEI1024_V) +DECLARE_INSN(vle8ff_v, MATCH_VLE8FF_V, MASK_VLE8FF_V) +DECLARE_INSN(vle16ff_v, MATCH_VLE16FF_V, MASK_VLE16FF_V) +DECLARE_INSN(vle32ff_v, MATCH_VLE32FF_V, MASK_VLE32FF_V) +DECLARE_INSN(vle64ff_v, MATCH_VLE64FF_V, MASK_VLE64FF_V) +DECLARE_INSN(vle128ff_v, MATCH_VLE128FF_V, MASK_VLE128FF_V) +DECLARE_INSN(vle256ff_v, MATCH_VLE256FF_V, MASK_VLE256FF_V) +DECLARE_INSN(vle512ff_v, MATCH_VLE512FF_V, MASK_VLE512FF_V) +DECLARE_INSN(vle1024ff_v, MATCH_VLE1024FF_V, MASK_VLE1024FF_V) +DECLARE_INSN(vl1re8_v, MATCH_VL1RE8_V, MASK_VL1RE8_V) +DECLARE_INSN(vl1re16_v, MATCH_VL1RE16_V, MASK_VL1RE16_V) +DECLARE_INSN(vl1re32_v, MATCH_VL1RE32_V, MASK_VL1RE32_V) +DECLARE_INSN(vl1re64_v, MATCH_VL1RE64_V, MASK_VL1RE64_V) +DECLARE_INSN(vl2re8_v, MATCH_VL2RE8_V, MASK_VL2RE8_V) +DECLARE_INSN(vl2re16_v, MATCH_VL2RE16_V, MASK_VL2RE16_V) +DECLARE_INSN(vl2re32_v, MATCH_VL2RE32_V, MASK_VL2RE32_V) +DECLARE_INSN(vl2re64_v, MATCH_VL2RE64_V, MASK_VL2RE64_V) +DECLARE_INSN(vl4re8_v, MATCH_VL4RE8_V, MASK_VL4RE8_V) +DECLARE_INSN(vl4re16_v, MATCH_VL4RE16_V, MASK_VL4RE16_V) +DECLARE_INSN(vl4re32_v, MATCH_VL4RE32_V, MASK_VL4RE32_V) +DECLARE_INSN(vl4re64_v, MATCH_VL4RE64_V, MASK_VL4RE64_V) +DECLARE_INSN(vl8re8_v, MATCH_VL8RE8_V, MASK_VL8RE8_V) +DECLARE_INSN(vl8re16_v, MATCH_VL8RE16_V, MASK_VL8RE16_V) +DECLARE_INSN(vl8re32_v, MATCH_VL8RE32_V, MASK_VL8RE32_V) +DECLARE_INSN(vl8re64_v, MATCH_VL8RE64_V, MASK_VL8RE64_V) +DECLARE_INSN(vs1r_v, MATCH_VS1R_V, MASK_VS1R_V) +DECLARE_INSN(vs2r_v, MATCH_VS2R_V, MASK_VS2R_V) +DECLARE_INSN(vs4r_v, MATCH_VS4R_V, MASK_VS4R_V) +DECLARE_INSN(vs8r_v, MATCH_VS8R_V, MASK_VS8R_V) +DECLARE_INSN(vfadd_vf, MATCH_VFADD_VF, MASK_VFADD_VF) +DECLARE_INSN(vfsub_vf, MATCH_VFSUB_VF, MASK_VFSUB_VF) +DECLARE_INSN(vfmin_vf, MATCH_VFMIN_VF, MASK_VFMIN_VF) +DECLARE_INSN(vfmax_vf, MATCH_VFMAX_VF, MASK_VFMAX_VF) +DECLARE_INSN(vfsgnj_vf, MATCH_VFSGNJ_VF, MASK_VFSGNJ_VF) +DECLARE_INSN(vfsgnjn_vf, MATCH_VFSGNJN_VF, MASK_VFSGNJN_VF) +DECLARE_INSN(vfsgnjx_vf, MATCH_VFSGNJX_VF, MASK_VFSGNJX_VF) +DECLARE_INSN(vfslide1up_vf, MATCH_VFSLIDE1UP_VF, MASK_VFSLIDE1UP_VF) +DECLARE_INSN(vfslide1down_vf, MATCH_VFSLIDE1DOWN_VF, MASK_VFSLIDE1DOWN_VF) +DECLARE_INSN(vfmv_s_f, MATCH_VFMV_S_F, MASK_VFMV_S_F) +DECLARE_INSN(vfmerge_vfm, MATCH_VFMERGE_VFM, MASK_VFMERGE_VFM) +DECLARE_INSN(vfmv_v_f, MATCH_VFMV_V_F, MASK_VFMV_V_F) +DECLARE_INSN(vmfeq_vf, MATCH_VMFEQ_VF, MASK_VMFEQ_VF) +DECLARE_INSN(vmfle_vf, MATCH_VMFLE_VF, MASK_VMFLE_VF) +DECLARE_INSN(vmflt_vf, MATCH_VMFLT_VF, MASK_VMFLT_VF) +DECLARE_INSN(vmfne_vf, MATCH_VMFNE_VF, MASK_VMFNE_VF) +DECLARE_INSN(vmfgt_vf, MATCH_VMFGT_VF, MASK_VMFGT_VF) +DECLARE_INSN(vmfge_vf, MATCH_VMFGE_VF, MASK_VMFGE_VF) +DECLARE_INSN(vfdiv_vf, MATCH_VFDIV_VF, MASK_VFDIV_VF) +DECLARE_INSN(vfrdiv_vf, MATCH_VFRDIV_VF, MASK_VFRDIV_VF) +DECLARE_INSN(vfmul_vf, MATCH_VFMUL_VF, MASK_VFMUL_VF) +DECLARE_INSN(vfrsub_vf, MATCH_VFRSUB_VF, MASK_VFRSUB_VF) +DECLARE_INSN(vfmadd_vf, MATCH_VFMADD_VF, MASK_VFMADD_VF) +DECLARE_INSN(vfnmadd_vf, MATCH_VFNMADD_VF, MASK_VFNMADD_VF) +DECLARE_INSN(vfmsub_vf, MATCH_VFMSUB_VF, MASK_VFMSUB_VF) +DECLARE_INSN(vfnmsub_vf, MATCH_VFNMSUB_VF, MASK_VFNMSUB_VF) +DECLARE_INSN(vfmacc_vf, MATCH_VFMACC_VF, MASK_VFMACC_VF) +DECLARE_INSN(vfnmacc_vf, MATCH_VFNMACC_VF, MASK_VFNMACC_VF) +DECLARE_INSN(vfmsac_vf, MATCH_VFMSAC_VF, MASK_VFMSAC_VF) +DECLARE_INSN(vfnmsac_vf, MATCH_VFNMSAC_VF, MASK_VFNMSAC_VF) +DECLARE_INSN(vfwadd_vf, MATCH_VFWADD_VF, MASK_VFWADD_VF) +DECLARE_INSN(vfwsub_vf, MATCH_VFWSUB_VF, MASK_VFWSUB_VF) +DECLARE_INSN(vfwadd_wf, MATCH_VFWADD_WF, MASK_VFWADD_WF) +DECLARE_INSN(vfwsub_wf, MATCH_VFWSUB_WF, MASK_VFWSUB_WF) +DECLARE_INSN(vfwmul_vf, MATCH_VFWMUL_VF, MASK_VFWMUL_VF) +DECLARE_INSN(vfwmacc_vf, MATCH_VFWMACC_VF, MASK_VFWMACC_VF) +DECLARE_INSN(vfwnmacc_vf, MATCH_VFWNMACC_VF, MASK_VFWNMACC_VF) +DECLARE_INSN(vfwmsac_vf, MATCH_VFWMSAC_VF, MASK_VFWMSAC_VF) +DECLARE_INSN(vfwnmsac_vf, MATCH_VFWNMSAC_VF, MASK_VFWNMSAC_VF) +DECLARE_INSN(vfadd_vv, MATCH_VFADD_VV, MASK_VFADD_VV) +DECLARE_INSN(vfredusum_vs, MATCH_VFREDUSUM_VS, MASK_VFREDUSUM_VS) +DECLARE_INSN(vfsub_vv, MATCH_VFSUB_VV, MASK_VFSUB_VV) +DECLARE_INSN(vfredosum_vs, MATCH_VFREDOSUM_VS, MASK_VFREDOSUM_VS) +DECLARE_INSN(vfmin_vv, MATCH_VFMIN_VV, MASK_VFMIN_VV) +DECLARE_INSN(vfredmin_vs, MATCH_VFREDMIN_VS, MASK_VFREDMIN_VS) +DECLARE_INSN(vfmax_vv, MATCH_VFMAX_VV, MASK_VFMAX_VV) +DECLARE_INSN(vfredmax_vs, MATCH_VFREDMAX_VS, MASK_VFREDMAX_VS) +DECLARE_INSN(vfsgnj_vv, MATCH_VFSGNJ_VV, MASK_VFSGNJ_VV) +DECLARE_INSN(vfsgnjn_vv, MATCH_VFSGNJN_VV, MASK_VFSGNJN_VV) +DECLARE_INSN(vfsgnjx_vv, MATCH_VFSGNJX_VV, MASK_VFSGNJX_VV) +DECLARE_INSN(vfmv_f_s, MATCH_VFMV_F_S, MASK_VFMV_F_S) +DECLARE_INSN(vmfeq_vv, MATCH_VMFEQ_VV, MASK_VMFEQ_VV) +DECLARE_INSN(vmfle_vv, MATCH_VMFLE_VV, MASK_VMFLE_VV) +DECLARE_INSN(vmflt_vv, MATCH_VMFLT_VV, MASK_VMFLT_VV) +DECLARE_INSN(vmfne_vv, MATCH_VMFNE_VV, MASK_VMFNE_VV) +DECLARE_INSN(vfdiv_vv, MATCH_VFDIV_VV, MASK_VFDIV_VV) +DECLARE_INSN(vfmul_vv, MATCH_VFMUL_VV, MASK_VFMUL_VV) +DECLARE_INSN(vfmadd_vv, MATCH_VFMADD_VV, MASK_VFMADD_VV) +DECLARE_INSN(vfnmadd_vv, MATCH_VFNMADD_VV, MASK_VFNMADD_VV) +DECLARE_INSN(vfmsub_vv, MATCH_VFMSUB_VV, MASK_VFMSUB_VV) +DECLARE_INSN(vfnmsub_vv, MATCH_VFNMSUB_VV, MASK_VFNMSUB_VV) +DECLARE_INSN(vfmacc_vv, MATCH_VFMACC_VV, MASK_VFMACC_VV) +DECLARE_INSN(vfnmacc_vv, MATCH_VFNMACC_VV, MASK_VFNMACC_VV) +DECLARE_INSN(vfmsac_vv, MATCH_VFMSAC_VV, MASK_VFMSAC_VV) +DECLARE_INSN(vfnmsac_vv, MATCH_VFNMSAC_VV, MASK_VFNMSAC_VV) +DECLARE_INSN(vfcvt_xu_f_v, MATCH_VFCVT_XU_F_V, MASK_VFCVT_XU_F_V) +DECLARE_INSN(vfcvt_x_f_v, MATCH_VFCVT_X_F_V, MASK_VFCVT_X_F_V) +DECLARE_INSN(vfcvt_f_xu_v, MATCH_VFCVT_F_XU_V, MASK_VFCVT_F_XU_V) +DECLARE_INSN(vfcvt_f_x_v, MATCH_VFCVT_F_X_V, MASK_VFCVT_F_X_V) +DECLARE_INSN(vfcvt_rtz_xu_f_v, MATCH_VFCVT_RTZ_XU_F_V, MASK_VFCVT_RTZ_XU_F_V) +DECLARE_INSN(vfcvt_rtz_x_f_v, MATCH_VFCVT_RTZ_X_F_V, MASK_VFCVT_RTZ_X_F_V) +DECLARE_INSN(vfwcvt_xu_f_v, MATCH_VFWCVT_XU_F_V, MASK_VFWCVT_XU_F_V) +DECLARE_INSN(vfwcvt_x_f_v, MATCH_VFWCVT_X_F_V, MASK_VFWCVT_X_F_V) +DECLARE_INSN(vfwcvt_f_xu_v, MATCH_VFWCVT_F_XU_V, MASK_VFWCVT_F_XU_V) +DECLARE_INSN(vfwcvt_f_x_v, MATCH_VFWCVT_F_X_V, MASK_VFWCVT_F_X_V) +DECLARE_INSN(vfwcvt_f_f_v, MATCH_VFWCVT_F_F_V, MASK_VFWCVT_F_F_V) +DECLARE_INSN(vfwcvt_rtz_xu_f_v, MATCH_VFWCVT_RTZ_XU_F_V, MASK_VFWCVT_RTZ_XU_F_V) +DECLARE_INSN(vfwcvt_rtz_x_f_v, MATCH_VFWCVT_RTZ_X_F_V, MASK_VFWCVT_RTZ_X_F_V) +DECLARE_INSN(vfncvt_xu_f_w, MATCH_VFNCVT_XU_F_W, MASK_VFNCVT_XU_F_W) +DECLARE_INSN(vfncvt_x_f_w, MATCH_VFNCVT_X_F_W, MASK_VFNCVT_X_F_W) +DECLARE_INSN(vfncvt_f_xu_w, MATCH_VFNCVT_F_XU_W, MASK_VFNCVT_F_XU_W) +DECLARE_INSN(vfncvt_f_x_w, MATCH_VFNCVT_F_X_W, MASK_VFNCVT_F_X_W) +DECLARE_INSN(vfncvt_f_f_w, MATCH_VFNCVT_F_F_W, MASK_VFNCVT_F_F_W) +DECLARE_INSN(vfncvt_rod_f_f_w, MATCH_VFNCVT_ROD_F_F_W, MASK_VFNCVT_ROD_F_F_W) +DECLARE_INSN(vfncvt_rtz_xu_f_w, MATCH_VFNCVT_RTZ_XU_F_W, MASK_VFNCVT_RTZ_XU_F_W) +DECLARE_INSN(vfncvt_rtz_x_f_w, MATCH_VFNCVT_RTZ_X_F_W, MASK_VFNCVT_RTZ_X_F_W) +DECLARE_INSN(vfsqrt_v, MATCH_VFSQRT_V, MASK_VFSQRT_V) +DECLARE_INSN(vfrsqrt7_v, MATCH_VFRSQRT7_V, MASK_VFRSQRT7_V) +DECLARE_INSN(vfrec7_v, MATCH_VFREC7_V, MASK_VFREC7_V) +DECLARE_INSN(vfclass_v, MATCH_VFCLASS_V, MASK_VFCLASS_V) +DECLARE_INSN(vfwadd_vv, MATCH_VFWADD_VV, MASK_VFWADD_VV) +DECLARE_INSN(vfwredusum_vs, MATCH_VFWREDUSUM_VS, MASK_VFWREDUSUM_VS) +DECLARE_INSN(vfwsub_vv, MATCH_VFWSUB_VV, MASK_VFWSUB_VV) +DECLARE_INSN(vfwredosum_vs, MATCH_VFWREDOSUM_VS, MASK_VFWREDOSUM_VS) +DECLARE_INSN(vfwadd_wv, MATCH_VFWADD_WV, MASK_VFWADD_WV) +DECLARE_INSN(vfwsub_wv, MATCH_VFWSUB_WV, MASK_VFWSUB_WV) +DECLARE_INSN(vfwmul_vv, MATCH_VFWMUL_VV, MASK_VFWMUL_VV) +DECLARE_INSN(vfwmacc_vv, MATCH_VFWMACC_VV, MASK_VFWMACC_VV) +DECLARE_INSN(vfwnmacc_vv, MATCH_VFWNMACC_VV, MASK_VFWNMACC_VV) +DECLARE_INSN(vfwmsac_vv, MATCH_VFWMSAC_VV, MASK_VFWMSAC_VV) +DECLARE_INSN(vfwnmsac_vv, MATCH_VFWNMSAC_VV, MASK_VFWNMSAC_VV) +DECLARE_INSN(vadd_vx, MATCH_VADD_VX, MASK_VADD_VX) +DECLARE_INSN(vsub_vx, MATCH_VSUB_VX, MASK_VSUB_VX) +DECLARE_INSN(vrsub_vx, MATCH_VRSUB_VX, MASK_VRSUB_VX) +DECLARE_INSN(vminu_vx, MATCH_VMINU_VX, MASK_VMINU_VX) +DECLARE_INSN(vmin_vx, MATCH_VMIN_VX, MASK_VMIN_VX) +DECLARE_INSN(vmaxu_vx, MATCH_VMAXU_VX, MASK_VMAXU_VX) +DECLARE_INSN(vmax_vx, MATCH_VMAX_VX, MASK_VMAX_VX) +DECLARE_INSN(vand_vx, MATCH_VAND_VX, MASK_VAND_VX) +DECLARE_INSN(vor_vx, MATCH_VOR_VX, MASK_VOR_VX) +DECLARE_INSN(vxor_vx, MATCH_VXOR_VX, MASK_VXOR_VX) +DECLARE_INSN(vrgather_vx, MATCH_VRGATHER_VX, MASK_VRGATHER_VX) +DECLARE_INSN(vslideup_vx, MATCH_VSLIDEUP_VX, MASK_VSLIDEUP_VX) +DECLARE_INSN(vslidedown_vx, MATCH_VSLIDEDOWN_VX, MASK_VSLIDEDOWN_VX) +DECLARE_INSN(vadc_vxm, MATCH_VADC_VXM, MASK_VADC_VXM) +DECLARE_INSN(vmadc_vxm, MATCH_VMADC_VXM, MASK_VMADC_VXM) +DECLARE_INSN(vmadc_vx, MATCH_VMADC_VX, MASK_VMADC_VX) +DECLARE_INSN(vsbc_vxm, MATCH_VSBC_VXM, MASK_VSBC_VXM) +DECLARE_INSN(vmsbc_vxm, MATCH_VMSBC_VXM, MASK_VMSBC_VXM) +DECLARE_INSN(vmsbc_vx, MATCH_VMSBC_VX, MASK_VMSBC_VX) +DECLARE_INSN(vmerge_vxm, MATCH_VMERGE_VXM, MASK_VMERGE_VXM) +DECLARE_INSN(vmv_v_x, MATCH_VMV_V_X, MASK_VMV_V_X) +DECLARE_INSN(vmseq_vx, MATCH_VMSEQ_VX, MASK_VMSEQ_VX) +DECLARE_INSN(vmsne_vx, MATCH_VMSNE_VX, MASK_VMSNE_VX) +DECLARE_INSN(vmsltu_vx, MATCH_VMSLTU_VX, MASK_VMSLTU_VX) +DECLARE_INSN(vmslt_vx, MATCH_VMSLT_VX, MASK_VMSLT_VX) +DECLARE_INSN(vmsleu_vx, MATCH_VMSLEU_VX, MASK_VMSLEU_VX) +DECLARE_INSN(vmsle_vx, MATCH_VMSLE_VX, MASK_VMSLE_VX) +DECLARE_INSN(vmsgtu_vx, MATCH_VMSGTU_VX, MASK_VMSGTU_VX) +DECLARE_INSN(vmsgt_vx, MATCH_VMSGT_VX, MASK_VMSGT_VX) +DECLARE_INSN(vsaddu_vx, MATCH_VSADDU_VX, MASK_VSADDU_VX) +DECLARE_INSN(vsadd_vx, MATCH_VSADD_VX, MASK_VSADD_VX) +DECLARE_INSN(vssubu_vx, MATCH_VSSUBU_VX, MASK_VSSUBU_VX) +DECLARE_INSN(vssub_vx, MATCH_VSSUB_VX, MASK_VSSUB_VX) +DECLARE_INSN(vsll_vx, MATCH_VSLL_VX, MASK_VSLL_VX) +DECLARE_INSN(vsmul_vx, MATCH_VSMUL_VX, MASK_VSMUL_VX) +DECLARE_INSN(vsrl_vx, MATCH_VSRL_VX, MASK_VSRL_VX) +DECLARE_INSN(vsra_vx, MATCH_VSRA_VX, MASK_VSRA_VX) +DECLARE_INSN(vssrl_vx, MATCH_VSSRL_VX, MASK_VSSRL_VX) +DECLARE_INSN(vssra_vx, MATCH_VSSRA_VX, MASK_VSSRA_VX) +DECLARE_INSN(vnsrl_wx, MATCH_VNSRL_WX, MASK_VNSRL_WX) +DECLARE_INSN(vnsra_wx, MATCH_VNSRA_WX, MASK_VNSRA_WX) +DECLARE_INSN(vnclipu_wx, MATCH_VNCLIPU_WX, MASK_VNCLIPU_WX) +DECLARE_INSN(vnclip_wx, MATCH_VNCLIP_WX, MASK_VNCLIP_WX) +DECLARE_INSN(vadd_vv, MATCH_VADD_VV, MASK_VADD_VV) +DECLARE_INSN(vsub_vv, MATCH_VSUB_VV, MASK_VSUB_VV) +DECLARE_INSN(vminu_vv, MATCH_VMINU_VV, MASK_VMINU_VV) +DECLARE_INSN(vmin_vv, MATCH_VMIN_VV, MASK_VMIN_VV) +DECLARE_INSN(vmaxu_vv, MATCH_VMAXU_VV, MASK_VMAXU_VV) +DECLARE_INSN(vmax_vv, MATCH_VMAX_VV, MASK_VMAX_VV) +DECLARE_INSN(vand_vv, MATCH_VAND_VV, MASK_VAND_VV) +DECLARE_INSN(vor_vv, MATCH_VOR_VV, MASK_VOR_VV) +DECLARE_INSN(vxor_vv, MATCH_VXOR_VV, MASK_VXOR_VV) +DECLARE_INSN(vrgather_vv, MATCH_VRGATHER_VV, MASK_VRGATHER_VV) +DECLARE_INSN(vrgatherei16_vv, MATCH_VRGATHEREI16_VV, MASK_VRGATHEREI16_VV) +DECLARE_INSN(vadc_vvm, MATCH_VADC_VVM, MASK_VADC_VVM) +DECLARE_INSN(vmadc_vvm, MATCH_VMADC_VVM, MASK_VMADC_VVM) +DECLARE_INSN(vmadc_vv, MATCH_VMADC_VV, MASK_VMADC_VV) +DECLARE_INSN(vsbc_vvm, MATCH_VSBC_VVM, MASK_VSBC_VVM) +DECLARE_INSN(vmsbc_vvm, MATCH_VMSBC_VVM, MASK_VMSBC_VVM) +DECLARE_INSN(vmsbc_vv, MATCH_VMSBC_VV, MASK_VMSBC_VV) +DECLARE_INSN(vmerge_vvm, MATCH_VMERGE_VVM, MASK_VMERGE_VVM) +DECLARE_INSN(vmv_v_v, MATCH_VMV_V_V, MASK_VMV_V_V) +DECLARE_INSN(vmseq_vv, MATCH_VMSEQ_VV, MASK_VMSEQ_VV) +DECLARE_INSN(vmsne_vv, MATCH_VMSNE_VV, MASK_VMSNE_VV) +DECLARE_INSN(vmsltu_vv, MATCH_VMSLTU_VV, MASK_VMSLTU_VV) +DECLARE_INSN(vmslt_vv, MATCH_VMSLT_VV, MASK_VMSLT_VV) +DECLARE_INSN(vmsleu_vv, MATCH_VMSLEU_VV, MASK_VMSLEU_VV) +DECLARE_INSN(vmsle_vv, MATCH_VMSLE_VV, MASK_VMSLE_VV) +DECLARE_INSN(vsaddu_vv, MATCH_VSADDU_VV, MASK_VSADDU_VV) +DECLARE_INSN(vsadd_vv, MATCH_VSADD_VV, MASK_VSADD_VV) +DECLARE_INSN(vssubu_vv, MATCH_VSSUBU_VV, MASK_VSSUBU_VV) +DECLARE_INSN(vssub_vv, MATCH_VSSUB_VV, MASK_VSSUB_VV) +DECLARE_INSN(vsll_vv, MATCH_VSLL_VV, MASK_VSLL_VV) +DECLARE_INSN(vsmul_vv, MATCH_VSMUL_VV, MASK_VSMUL_VV) +DECLARE_INSN(vsrl_vv, MATCH_VSRL_VV, MASK_VSRL_VV) +DECLARE_INSN(vsra_vv, MATCH_VSRA_VV, MASK_VSRA_VV) +DECLARE_INSN(vssrl_vv, MATCH_VSSRL_VV, MASK_VSSRL_VV) +DECLARE_INSN(vssra_vv, MATCH_VSSRA_VV, MASK_VSSRA_VV) +DECLARE_INSN(vnsrl_wv, MATCH_VNSRL_WV, MASK_VNSRL_WV) +DECLARE_INSN(vnsra_wv, MATCH_VNSRA_WV, MASK_VNSRA_WV) +DECLARE_INSN(vnclipu_wv, MATCH_VNCLIPU_WV, MASK_VNCLIPU_WV) +DECLARE_INSN(vnclip_wv, MATCH_VNCLIP_WV, MASK_VNCLIP_WV) +DECLARE_INSN(vwredsumu_vs, MATCH_VWREDSUMU_VS, MASK_VWREDSUMU_VS) +DECLARE_INSN(vwredsum_vs, MATCH_VWREDSUM_VS, MASK_VWREDSUM_VS) +DECLARE_INSN(vadd_vi, MATCH_VADD_VI, MASK_VADD_VI) +DECLARE_INSN(vrsub_vi, MATCH_VRSUB_VI, MASK_VRSUB_VI) +DECLARE_INSN(vand_vi, MATCH_VAND_VI, MASK_VAND_VI) +DECLARE_INSN(vor_vi, MATCH_VOR_VI, MASK_VOR_VI) +DECLARE_INSN(vxor_vi, MATCH_VXOR_VI, MASK_VXOR_VI) +DECLARE_INSN(vrgather_vi, MATCH_VRGATHER_VI, MASK_VRGATHER_VI) +DECLARE_INSN(vslideup_vi, MATCH_VSLIDEUP_VI, MASK_VSLIDEUP_VI) +DECLARE_INSN(vslidedown_vi, MATCH_VSLIDEDOWN_VI, MASK_VSLIDEDOWN_VI) +DECLARE_INSN(vadc_vim, MATCH_VADC_VIM, MASK_VADC_VIM) +DECLARE_INSN(vmadc_vim, MATCH_VMADC_VIM, MASK_VMADC_VIM) +DECLARE_INSN(vmadc_vi, MATCH_VMADC_VI, MASK_VMADC_VI) +DECLARE_INSN(vmerge_vim, MATCH_VMERGE_VIM, MASK_VMERGE_VIM) +DECLARE_INSN(vmv_v_i, MATCH_VMV_V_I, MASK_VMV_V_I) +DECLARE_INSN(vmseq_vi, MATCH_VMSEQ_VI, MASK_VMSEQ_VI) +DECLARE_INSN(vmsne_vi, MATCH_VMSNE_VI, MASK_VMSNE_VI) +DECLARE_INSN(vmsleu_vi, MATCH_VMSLEU_VI, MASK_VMSLEU_VI) +DECLARE_INSN(vmsle_vi, MATCH_VMSLE_VI, MASK_VMSLE_VI) +DECLARE_INSN(vmsgtu_vi, MATCH_VMSGTU_VI, MASK_VMSGTU_VI) +DECLARE_INSN(vmsgt_vi, MATCH_VMSGT_VI, MASK_VMSGT_VI) +DECLARE_INSN(vsaddu_vi, MATCH_VSADDU_VI, MASK_VSADDU_VI) +DECLARE_INSN(vsadd_vi, MATCH_VSADD_VI, MASK_VSADD_VI) +DECLARE_INSN(vsll_vi, MATCH_VSLL_VI, MASK_VSLL_VI) +DECLARE_INSN(vmv1r_v, MATCH_VMV1R_V, MASK_VMV1R_V) +DECLARE_INSN(vmv2r_v, MATCH_VMV2R_V, MASK_VMV2R_V) +DECLARE_INSN(vmv4r_v, MATCH_VMV4R_V, MASK_VMV4R_V) +DECLARE_INSN(vmv8r_v, MATCH_VMV8R_V, MASK_VMV8R_V) +DECLARE_INSN(vsrl_vi, MATCH_VSRL_VI, MASK_VSRL_VI) +DECLARE_INSN(vsra_vi, MATCH_VSRA_VI, MASK_VSRA_VI) +DECLARE_INSN(vssrl_vi, MATCH_VSSRL_VI, MASK_VSSRL_VI) +DECLARE_INSN(vssra_vi, MATCH_VSSRA_VI, MASK_VSSRA_VI) +DECLARE_INSN(vnsrl_wi, MATCH_VNSRL_WI, MASK_VNSRL_WI) +DECLARE_INSN(vnsra_wi, MATCH_VNSRA_WI, MASK_VNSRA_WI) +DECLARE_INSN(vnclipu_wi, MATCH_VNCLIPU_WI, MASK_VNCLIPU_WI) +DECLARE_INSN(vnclip_wi, MATCH_VNCLIP_WI, MASK_VNCLIP_WI) +DECLARE_INSN(vredsum_vs, MATCH_VREDSUM_VS, MASK_VREDSUM_VS) +DECLARE_INSN(vredand_vs, MATCH_VREDAND_VS, MASK_VREDAND_VS) +DECLARE_INSN(vredor_vs, MATCH_VREDOR_VS, MASK_VREDOR_VS) +DECLARE_INSN(vredxor_vs, MATCH_VREDXOR_VS, MASK_VREDXOR_VS) +DECLARE_INSN(vredminu_vs, MATCH_VREDMINU_VS, MASK_VREDMINU_VS) +DECLARE_INSN(vredmin_vs, MATCH_VREDMIN_VS, MASK_VREDMIN_VS) +DECLARE_INSN(vredmaxu_vs, MATCH_VREDMAXU_VS, MASK_VREDMAXU_VS) +DECLARE_INSN(vredmax_vs, MATCH_VREDMAX_VS, MASK_VREDMAX_VS) +DECLARE_INSN(vaaddu_vv, MATCH_VAADDU_VV, MASK_VAADDU_VV) +DECLARE_INSN(vaadd_vv, MATCH_VAADD_VV, MASK_VAADD_VV) +DECLARE_INSN(vasubu_vv, MATCH_VASUBU_VV, MASK_VASUBU_VV) +DECLARE_INSN(vasub_vv, MATCH_VASUB_VV, MASK_VASUB_VV) +DECLARE_INSN(vmv_x_s, MATCH_VMV_X_S, MASK_VMV_X_S) +DECLARE_INSN(vzext_vf8, MATCH_VZEXT_VF8, MASK_VZEXT_VF8) +DECLARE_INSN(vsext_vf8, MATCH_VSEXT_VF8, MASK_VSEXT_VF8) +DECLARE_INSN(vzext_vf4, MATCH_VZEXT_VF4, MASK_VZEXT_VF4) +DECLARE_INSN(vsext_vf4, MATCH_VSEXT_VF4, MASK_VSEXT_VF4) +DECLARE_INSN(vzext_vf2, MATCH_VZEXT_VF2, MASK_VZEXT_VF2) +DECLARE_INSN(vsext_vf2, MATCH_VSEXT_VF2, MASK_VSEXT_VF2) +DECLARE_INSN(vcompress_vm, MATCH_VCOMPRESS_VM, MASK_VCOMPRESS_VM) +DECLARE_INSN(vmandn_mm, MATCH_VMANDN_MM, MASK_VMANDN_MM) +DECLARE_INSN(vmand_mm, MATCH_VMAND_MM, MASK_VMAND_MM) +DECLARE_INSN(vmor_mm, MATCH_VMOR_MM, MASK_VMOR_MM) +DECLARE_INSN(vmxor_mm, MATCH_VMXOR_MM, MASK_VMXOR_MM) +DECLARE_INSN(vmorn_mm, MATCH_VMORN_MM, MASK_VMORN_MM) +DECLARE_INSN(vmnand_mm, MATCH_VMNAND_MM, MASK_VMNAND_MM) +DECLARE_INSN(vmnor_mm, MATCH_VMNOR_MM, MASK_VMNOR_MM) +DECLARE_INSN(vmxnor_mm, MATCH_VMXNOR_MM, MASK_VMXNOR_MM) +DECLARE_INSN(vmsbf_m, MATCH_VMSBF_M, MASK_VMSBF_M) +DECLARE_INSN(vmsof_m, MATCH_VMSOF_M, MASK_VMSOF_M) +DECLARE_INSN(vmsif_m, MATCH_VMSIF_M, MASK_VMSIF_M) +DECLARE_INSN(viota_m, MATCH_VIOTA_M, MASK_VIOTA_M) +DECLARE_INSN(vid_v, MATCH_VID_V, MASK_VID_V) +DECLARE_INSN(vcpop_m, MATCH_VCPOP_M, MASK_VCPOP_M) +DECLARE_INSN(vfirst_m, MATCH_VFIRST_M, MASK_VFIRST_M) +DECLARE_INSN(vdivu_vv, MATCH_VDIVU_VV, MASK_VDIVU_VV) +DECLARE_INSN(vdiv_vv, MATCH_VDIV_VV, MASK_VDIV_VV) +DECLARE_INSN(vremu_vv, MATCH_VREMU_VV, MASK_VREMU_VV) +DECLARE_INSN(vrem_vv, MATCH_VREM_VV, MASK_VREM_VV) +DECLARE_INSN(vmulhu_vv, MATCH_VMULHU_VV, MASK_VMULHU_VV) +DECLARE_INSN(vmul_vv, MATCH_VMUL_VV, MASK_VMUL_VV) +DECLARE_INSN(vmulhsu_vv, MATCH_VMULHSU_VV, MASK_VMULHSU_VV) +DECLARE_INSN(vmulh_vv, MATCH_VMULH_VV, MASK_VMULH_VV) +DECLARE_INSN(vmadd_vv, MATCH_VMADD_VV, MASK_VMADD_VV) +DECLARE_INSN(vnmsub_vv, MATCH_VNMSUB_VV, MASK_VNMSUB_VV) +DECLARE_INSN(vmacc_vv, MATCH_VMACC_VV, MASK_VMACC_VV) +DECLARE_INSN(vnmsac_vv, MATCH_VNMSAC_VV, MASK_VNMSAC_VV) +DECLARE_INSN(vwaddu_vv, MATCH_VWADDU_VV, MASK_VWADDU_VV) +DECLARE_INSN(vwadd_vv, MATCH_VWADD_VV, MASK_VWADD_VV) +DECLARE_INSN(vwsubu_vv, MATCH_VWSUBU_VV, MASK_VWSUBU_VV) +DECLARE_INSN(vwsub_vv, MATCH_VWSUB_VV, MASK_VWSUB_VV) +DECLARE_INSN(vwaddu_wv, MATCH_VWADDU_WV, MASK_VWADDU_WV) +DECLARE_INSN(vwadd_wv, MATCH_VWADD_WV, MASK_VWADD_WV) +DECLARE_INSN(vwsubu_wv, MATCH_VWSUBU_WV, MASK_VWSUBU_WV) +DECLARE_INSN(vwsub_wv, MATCH_VWSUB_WV, MASK_VWSUB_WV) +DECLARE_INSN(vwmulu_vv, MATCH_VWMULU_VV, MASK_VWMULU_VV) +DECLARE_INSN(vwmulsu_vv, MATCH_VWMULSU_VV, MASK_VWMULSU_VV) +DECLARE_INSN(vwmul_vv, MATCH_VWMUL_VV, MASK_VWMUL_VV) +DECLARE_INSN(vwmaccu_vv, MATCH_VWMACCU_VV, MASK_VWMACCU_VV) +DECLARE_INSN(vwmacc_vv, MATCH_VWMACC_VV, MASK_VWMACC_VV) +DECLARE_INSN(vwmaccsu_vv, MATCH_VWMACCSU_VV, MASK_VWMACCSU_VV) +DECLARE_INSN(vaaddu_vx, MATCH_VAADDU_VX, MASK_VAADDU_VX) +DECLARE_INSN(vaadd_vx, MATCH_VAADD_VX, MASK_VAADD_VX) +DECLARE_INSN(vasubu_vx, MATCH_VASUBU_VX, MASK_VASUBU_VX) +DECLARE_INSN(vasub_vx, MATCH_VASUB_VX, MASK_VASUB_VX) +DECLARE_INSN(vmv_s_x, MATCH_VMV_S_X, MASK_VMV_S_X) +DECLARE_INSN(vslide1up_vx, MATCH_VSLIDE1UP_VX, MASK_VSLIDE1UP_VX) +DECLARE_INSN(vslide1down_vx, MATCH_VSLIDE1DOWN_VX, MASK_VSLIDE1DOWN_VX) +DECLARE_INSN(vdivu_vx, MATCH_VDIVU_VX, MASK_VDIVU_VX) +DECLARE_INSN(vdiv_vx, MATCH_VDIV_VX, MASK_VDIV_VX) +DECLARE_INSN(vremu_vx, MATCH_VREMU_VX, MASK_VREMU_VX) +DECLARE_INSN(vrem_vx, MATCH_VREM_VX, MASK_VREM_VX) +DECLARE_INSN(vmulhu_vx, MATCH_VMULHU_VX, MASK_VMULHU_VX) +DECLARE_INSN(vmul_vx, MATCH_VMUL_VX, MASK_VMUL_VX) +DECLARE_INSN(vmulhsu_vx, MATCH_VMULHSU_VX, MASK_VMULHSU_VX) +DECLARE_INSN(vmulh_vx, MATCH_VMULH_VX, MASK_VMULH_VX) +DECLARE_INSN(vmadd_vx, MATCH_VMADD_VX, MASK_VMADD_VX) +DECLARE_INSN(vnmsub_vx, MATCH_VNMSUB_VX, MASK_VNMSUB_VX) +DECLARE_INSN(vmacc_vx, MATCH_VMACC_VX, MASK_VMACC_VX) +DECLARE_INSN(vnmsac_vx, MATCH_VNMSAC_VX, MASK_VNMSAC_VX) +DECLARE_INSN(vwaddu_vx, MATCH_VWADDU_VX, MASK_VWADDU_VX) +DECLARE_INSN(vwadd_vx, MATCH_VWADD_VX, MASK_VWADD_VX) +DECLARE_INSN(vwsubu_vx, MATCH_VWSUBU_VX, MASK_VWSUBU_VX) +DECLARE_INSN(vwsub_vx, MATCH_VWSUB_VX, MASK_VWSUB_VX) +DECLARE_INSN(vwaddu_wx, MATCH_VWADDU_WX, MASK_VWADDU_WX) +DECLARE_INSN(vwadd_wx, MATCH_VWADD_WX, MASK_VWADD_WX) +DECLARE_INSN(vwsubu_wx, MATCH_VWSUBU_WX, MASK_VWSUBU_WX) +DECLARE_INSN(vwsub_wx, MATCH_VWSUB_WX, MASK_VWSUB_WX) +DECLARE_INSN(vwmulu_vx, MATCH_VWMULU_VX, MASK_VWMULU_VX) +DECLARE_INSN(vwmulsu_vx, MATCH_VWMULSU_VX, MASK_VWMULSU_VX) +DECLARE_INSN(vwmul_vx, MATCH_VWMUL_VX, MASK_VWMUL_VX) +DECLARE_INSN(vwmaccu_vx, MATCH_VWMACCU_VX, MASK_VWMACCU_VX) +DECLARE_INSN(vwmacc_vx, MATCH_VWMACC_VX, MASK_VWMACC_VX) +DECLARE_INSN(vwmaccus_vx, MATCH_VWMACCUS_VX, MASK_VWMACCUS_VX) +DECLARE_INSN(vwmaccsu_vx, MATCH_VWMACCSU_VX, MASK_VWMACCSU_VX) +DECLARE_INSN(vamoswapei8_v, MATCH_VAMOSWAPEI8_V, MASK_VAMOSWAPEI8_V) +DECLARE_INSN(vamoaddei8_v, MATCH_VAMOADDEI8_V, MASK_VAMOADDEI8_V) +DECLARE_INSN(vamoxorei8_v, MATCH_VAMOXOREI8_V, MASK_VAMOXOREI8_V) +DECLARE_INSN(vamoandei8_v, MATCH_VAMOANDEI8_V, MASK_VAMOANDEI8_V) +DECLARE_INSN(vamoorei8_v, MATCH_VAMOOREI8_V, MASK_VAMOOREI8_V) +DECLARE_INSN(vamominei8_v, MATCH_VAMOMINEI8_V, MASK_VAMOMINEI8_V) +DECLARE_INSN(vamomaxei8_v, MATCH_VAMOMAXEI8_V, MASK_VAMOMAXEI8_V) +DECLARE_INSN(vamominuei8_v, MATCH_VAMOMINUEI8_V, MASK_VAMOMINUEI8_V) +DECLARE_INSN(vamomaxuei8_v, MATCH_VAMOMAXUEI8_V, MASK_VAMOMAXUEI8_V) +DECLARE_INSN(vamoswapei16_v, MATCH_VAMOSWAPEI16_V, MASK_VAMOSWAPEI16_V) +DECLARE_INSN(vamoaddei16_v, MATCH_VAMOADDEI16_V, MASK_VAMOADDEI16_V) +DECLARE_INSN(vamoxorei16_v, MATCH_VAMOXOREI16_V, MASK_VAMOXOREI16_V) +DECLARE_INSN(vamoandei16_v, MATCH_VAMOANDEI16_V, MASK_VAMOANDEI16_V) +DECLARE_INSN(vamoorei16_v, MATCH_VAMOOREI16_V, MASK_VAMOOREI16_V) +DECLARE_INSN(vamominei16_v, MATCH_VAMOMINEI16_V, MASK_VAMOMINEI16_V) +DECLARE_INSN(vamomaxei16_v, MATCH_VAMOMAXEI16_V, MASK_VAMOMAXEI16_V) +DECLARE_INSN(vamominuei16_v, MATCH_VAMOMINUEI16_V, MASK_VAMOMINUEI16_V) +DECLARE_INSN(vamomaxuei16_v, MATCH_VAMOMAXUEI16_V, MASK_VAMOMAXUEI16_V) +DECLARE_INSN(vamoswapei32_v, MATCH_VAMOSWAPEI32_V, MASK_VAMOSWAPEI32_V) +DECLARE_INSN(vamoaddei32_v, MATCH_VAMOADDEI32_V, MASK_VAMOADDEI32_V) +DECLARE_INSN(vamoxorei32_v, MATCH_VAMOXOREI32_V, MASK_VAMOXOREI32_V) +DECLARE_INSN(vamoandei32_v, MATCH_VAMOANDEI32_V, MASK_VAMOANDEI32_V) +DECLARE_INSN(vamoorei32_v, MATCH_VAMOOREI32_V, MASK_VAMOOREI32_V) +DECLARE_INSN(vamominei32_v, MATCH_VAMOMINEI32_V, MASK_VAMOMINEI32_V) +DECLARE_INSN(vamomaxei32_v, MATCH_VAMOMAXEI32_V, MASK_VAMOMAXEI32_V) +DECLARE_INSN(vamominuei32_v, MATCH_VAMOMINUEI32_V, MASK_VAMOMINUEI32_V) +DECLARE_INSN(vamomaxuei32_v, MATCH_VAMOMAXUEI32_V, MASK_VAMOMAXUEI32_V) +DECLARE_INSN(vamoswapei64_v, MATCH_VAMOSWAPEI64_V, MASK_VAMOSWAPEI64_V) +DECLARE_INSN(vamoaddei64_v, MATCH_VAMOADDEI64_V, MASK_VAMOADDEI64_V) +DECLARE_INSN(vamoxorei64_v, MATCH_VAMOXOREI64_V, MASK_VAMOXOREI64_V) +DECLARE_INSN(vamoandei64_v, MATCH_VAMOANDEI64_V, MASK_VAMOANDEI64_V) +DECLARE_INSN(vamoorei64_v, MATCH_VAMOOREI64_V, MASK_VAMOOREI64_V) +DECLARE_INSN(vamominei64_v, MATCH_VAMOMINEI64_V, MASK_VAMOMINEI64_V) +DECLARE_INSN(vamomaxei64_v, MATCH_VAMOMAXEI64_V, MASK_VAMOMAXEI64_V) +DECLARE_INSN(vamominuei64_v, MATCH_VAMOMINUEI64_V, MASK_VAMOMINUEI64_V) +DECLARE_INSN(vamomaxuei64_v, MATCH_VAMOMAXUEI64_V, MASK_VAMOMAXUEI64_V) +DECLARE_INSN(add8, MATCH_ADD8, MASK_ADD8) +DECLARE_INSN(add16, MATCH_ADD16, MASK_ADD16) +DECLARE_INSN(add64, MATCH_ADD64, MASK_ADD64) +DECLARE_INSN(ave, MATCH_AVE, MASK_AVE) +DECLARE_INSN(bitrev, MATCH_BITREV, MASK_BITREV) +DECLARE_INSN(bitrevi, MATCH_BITREVI, MASK_BITREVI) +DECLARE_INSN(bpick, MATCH_BPICK, MASK_BPICK) +DECLARE_INSN(clrs8, MATCH_CLRS8, MASK_CLRS8) +DECLARE_INSN(clrs16, MATCH_CLRS16, MASK_CLRS16) +DECLARE_INSN(clrs32, MATCH_CLRS32, MASK_CLRS32) +DECLARE_INSN(clo8, MATCH_CLO8, MASK_CLO8) +DECLARE_INSN(clo16, MATCH_CLO16, MASK_CLO16) +DECLARE_INSN(clo32, MATCH_CLO32, MASK_CLO32) +DECLARE_INSN(clz8, MATCH_CLZ8, MASK_CLZ8) +DECLARE_INSN(clz16, MATCH_CLZ16, MASK_CLZ16) +DECLARE_INSN(clz32, MATCH_CLZ32, MASK_CLZ32) +DECLARE_INSN(cmpeq8, MATCH_CMPEQ8, MASK_CMPEQ8) +DECLARE_INSN(cmpeq16, MATCH_CMPEQ16, MASK_CMPEQ16) +DECLARE_INSN(cras16, MATCH_CRAS16, MASK_CRAS16) +DECLARE_INSN(crsa16, MATCH_CRSA16, MASK_CRSA16) +DECLARE_INSN(insb, MATCH_INSB, MASK_INSB) +DECLARE_INSN(kabs8, MATCH_KABS8, MASK_KABS8) +DECLARE_INSN(kabs16, MATCH_KABS16, MASK_KABS16) +DECLARE_INSN(kabsw, MATCH_KABSW, MASK_KABSW) +DECLARE_INSN(kadd8, MATCH_KADD8, MASK_KADD8) +DECLARE_INSN(kadd16, MATCH_KADD16, MASK_KADD16) +DECLARE_INSN(kadd64, MATCH_KADD64, MASK_KADD64) +DECLARE_INSN(kaddh, MATCH_KADDH, MASK_KADDH) +DECLARE_INSN(kaddw, MATCH_KADDW, MASK_KADDW) +DECLARE_INSN(kcras16, MATCH_KCRAS16, MASK_KCRAS16) +DECLARE_INSN(kcrsa16, MATCH_KCRSA16, MASK_KCRSA16) +DECLARE_INSN(kdmbb, MATCH_KDMBB, MASK_KDMBB) +DECLARE_INSN(kdmbt, MATCH_KDMBT, MASK_KDMBT) +DECLARE_INSN(kdmtt, MATCH_KDMTT, MASK_KDMTT) +DECLARE_INSN(kdmabb, MATCH_KDMABB, MASK_KDMABB) +DECLARE_INSN(kdmabt, MATCH_KDMABT, MASK_KDMABT) +DECLARE_INSN(kdmatt, MATCH_KDMATT, MASK_KDMATT) +DECLARE_INSN(khm8, MATCH_KHM8, MASK_KHM8) +DECLARE_INSN(khmx8, MATCH_KHMX8, MASK_KHMX8) +DECLARE_INSN(khm16, MATCH_KHM16, MASK_KHM16) +DECLARE_INSN(khmx16, MATCH_KHMX16, MASK_KHMX16) +DECLARE_INSN(khmbb, MATCH_KHMBB, MASK_KHMBB) +DECLARE_INSN(khmbt, MATCH_KHMBT, MASK_KHMBT) +DECLARE_INSN(khmtt, MATCH_KHMTT, MASK_KHMTT) +DECLARE_INSN(kmabb, MATCH_KMABB, MASK_KMABB) +DECLARE_INSN(kmabt, MATCH_KMABT, MASK_KMABT) +DECLARE_INSN(kmatt, MATCH_KMATT, MASK_KMATT) +DECLARE_INSN(kmada, MATCH_KMADA, MASK_KMADA) +DECLARE_INSN(kmaxda, MATCH_KMAXDA, MASK_KMAXDA) +DECLARE_INSN(kmads, MATCH_KMADS, MASK_KMADS) +DECLARE_INSN(kmadrs, MATCH_KMADRS, MASK_KMADRS) +DECLARE_INSN(kmaxds, MATCH_KMAXDS, MASK_KMAXDS) +DECLARE_INSN(kmar64, MATCH_KMAR64, MASK_KMAR64) +DECLARE_INSN(kmda, MATCH_KMDA, MASK_KMDA) +DECLARE_INSN(kmxda, MATCH_KMXDA, MASK_KMXDA) +DECLARE_INSN(kmmac, MATCH_KMMAC, MASK_KMMAC) +DECLARE_INSN(kmmac_u, MATCH_KMMAC_U, MASK_KMMAC_U) +DECLARE_INSN(kmmawb, MATCH_KMMAWB, MASK_KMMAWB) +DECLARE_INSN(kmmawb_u, MATCH_KMMAWB_U, MASK_KMMAWB_U) +DECLARE_INSN(kmmawb2, MATCH_KMMAWB2, MASK_KMMAWB2) +DECLARE_INSN(kmmawb2_u, MATCH_KMMAWB2_U, MASK_KMMAWB2_U) +DECLARE_INSN(kmmawt, MATCH_KMMAWT, MASK_KMMAWT) +DECLARE_INSN(kmmawt_u, MATCH_KMMAWT_U, MASK_KMMAWT_U) +DECLARE_INSN(kmmawt2, MATCH_KMMAWT2, MASK_KMMAWT2) +DECLARE_INSN(kmmawt2_u, MATCH_KMMAWT2_U, MASK_KMMAWT2_U) +DECLARE_INSN(kmmsb, MATCH_KMMSB, MASK_KMMSB) +DECLARE_INSN(kmmsb_u, MATCH_KMMSB_U, MASK_KMMSB_U) +DECLARE_INSN(kmmwb2, MATCH_KMMWB2, MASK_KMMWB2) +DECLARE_INSN(kmmwb2_u, MATCH_KMMWB2_U, MASK_KMMWB2_U) +DECLARE_INSN(kmmwt2, MATCH_KMMWT2, MASK_KMMWT2) +DECLARE_INSN(kmmwt2_u, MATCH_KMMWT2_U, MASK_KMMWT2_U) +DECLARE_INSN(kmsda, MATCH_KMSDA, MASK_KMSDA) +DECLARE_INSN(kmsxda, MATCH_KMSXDA, MASK_KMSXDA) +DECLARE_INSN(kmsr64, MATCH_KMSR64, MASK_KMSR64) +DECLARE_INSN(ksllw, MATCH_KSLLW, MASK_KSLLW) +DECLARE_INSN(kslliw, MATCH_KSLLIW, MASK_KSLLIW) +DECLARE_INSN(ksll8, MATCH_KSLL8, MASK_KSLL8) +DECLARE_INSN(kslli8, MATCH_KSLLI8, MASK_KSLLI8) +DECLARE_INSN(ksll16, MATCH_KSLL16, MASK_KSLL16) +DECLARE_INSN(kslli16, MATCH_KSLLI16, MASK_KSLLI16) +DECLARE_INSN(kslra8, MATCH_KSLRA8, MASK_KSLRA8) +DECLARE_INSN(kslra8_u, MATCH_KSLRA8_U, MASK_KSLRA8_U) +DECLARE_INSN(kslra16, MATCH_KSLRA16, MASK_KSLRA16) +DECLARE_INSN(kslra16_u, MATCH_KSLRA16_U, MASK_KSLRA16_U) +DECLARE_INSN(kslraw, MATCH_KSLRAW, MASK_KSLRAW) +DECLARE_INSN(kslraw_u, MATCH_KSLRAW_U, MASK_KSLRAW_U) +DECLARE_INSN(kstas16, MATCH_KSTAS16, MASK_KSTAS16) +DECLARE_INSN(kstsa16, MATCH_KSTSA16, MASK_KSTSA16) +DECLARE_INSN(ksub8, MATCH_KSUB8, MASK_KSUB8) +DECLARE_INSN(ksub16, MATCH_KSUB16, MASK_KSUB16) +DECLARE_INSN(ksub64, MATCH_KSUB64, MASK_KSUB64) +DECLARE_INSN(ksubh, MATCH_KSUBH, MASK_KSUBH) +DECLARE_INSN(ksubw, MATCH_KSUBW, MASK_KSUBW) +DECLARE_INSN(kwmmul, MATCH_KWMMUL, MASK_KWMMUL) +DECLARE_INSN(kwmmul_u, MATCH_KWMMUL_U, MASK_KWMMUL_U) +DECLARE_INSN(maddr32, MATCH_MADDR32, MASK_MADDR32) +DECLARE_INSN(maxw, MATCH_MAXW, MASK_MAXW) +DECLARE_INSN(minw, MATCH_MINW, MASK_MINW) +DECLARE_INSN(msubr32, MATCH_MSUBR32, MASK_MSUBR32) +DECLARE_INSN(mulr64, MATCH_MULR64, MASK_MULR64) +DECLARE_INSN(mulsr64, MATCH_MULSR64, MASK_MULSR64) +DECLARE_INSN(pbsad, MATCH_PBSAD, MASK_PBSAD) +DECLARE_INSN(pbsada, MATCH_PBSADA, MASK_PBSADA) +DECLARE_INSN(pkbb16, MATCH_PKBB16, MASK_PKBB16) +DECLARE_INSN(pkbt16, MATCH_PKBT16, MASK_PKBT16) +DECLARE_INSN(pktt16, MATCH_PKTT16, MASK_PKTT16) +DECLARE_INSN(pktb16, MATCH_PKTB16, MASK_PKTB16) +DECLARE_INSN(radd8, MATCH_RADD8, MASK_RADD8) +DECLARE_INSN(radd16, MATCH_RADD16, MASK_RADD16) +DECLARE_INSN(radd64, MATCH_RADD64, MASK_RADD64) +DECLARE_INSN(raddw, MATCH_RADDW, MASK_RADDW) +DECLARE_INSN(rcras16, MATCH_RCRAS16, MASK_RCRAS16) +DECLARE_INSN(rcrsa16, MATCH_RCRSA16, MASK_RCRSA16) +DECLARE_INSN(rstas16, MATCH_RSTAS16, MASK_RSTAS16) +DECLARE_INSN(rstsa16, MATCH_RSTSA16, MASK_RSTSA16) +DECLARE_INSN(rsub8, MATCH_RSUB8, MASK_RSUB8) +DECLARE_INSN(rsub16, MATCH_RSUB16, MASK_RSUB16) +DECLARE_INSN(rsub64, MATCH_RSUB64, MASK_RSUB64) +DECLARE_INSN(rsubw, MATCH_RSUBW, MASK_RSUBW) +DECLARE_INSN(sclip8, MATCH_SCLIP8, MASK_SCLIP8) +DECLARE_INSN(sclip16, MATCH_SCLIP16, MASK_SCLIP16) +DECLARE_INSN(sclip32, MATCH_SCLIP32, MASK_SCLIP32) +DECLARE_INSN(scmple8, MATCH_SCMPLE8, MASK_SCMPLE8) +DECLARE_INSN(scmple16, MATCH_SCMPLE16, MASK_SCMPLE16) +DECLARE_INSN(scmplt8, MATCH_SCMPLT8, MASK_SCMPLT8) +DECLARE_INSN(scmplt16, MATCH_SCMPLT16, MASK_SCMPLT16) +DECLARE_INSN(sll8, MATCH_SLL8, MASK_SLL8) +DECLARE_INSN(slli8, MATCH_SLLI8, MASK_SLLI8) +DECLARE_INSN(sll16, MATCH_SLL16, MASK_SLL16) +DECLARE_INSN(slli16, MATCH_SLLI16, MASK_SLLI16) +DECLARE_INSN(smal, MATCH_SMAL, MASK_SMAL) +DECLARE_INSN(smalbb, MATCH_SMALBB, MASK_SMALBB) +DECLARE_INSN(smalbt, MATCH_SMALBT, MASK_SMALBT) +DECLARE_INSN(smaltt, MATCH_SMALTT, MASK_SMALTT) +DECLARE_INSN(smalda, MATCH_SMALDA, MASK_SMALDA) +DECLARE_INSN(smalxda, MATCH_SMALXDA, MASK_SMALXDA) +DECLARE_INSN(smalds, MATCH_SMALDS, MASK_SMALDS) +DECLARE_INSN(smaldrs, MATCH_SMALDRS, MASK_SMALDRS) +DECLARE_INSN(smalxds, MATCH_SMALXDS, MASK_SMALXDS) +DECLARE_INSN(smar64, MATCH_SMAR64, MASK_SMAR64) +DECLARE_INSN(smaqa, MATCH_SMAQA, MASK_SMAQA) +DECLARE_INSN(smaqa_su, MATCH_SMAQA_SU, MASK_SMAQA_SU) +DECLARE_INSN(smax8, MATCH_SMAX8, MASK_SMAX8) +DECLARE_INSN(smax16, MATCH_SMAX16, MASK_SMAX16) +DECLARE_INSN(smbb16, MATCH_SMBB16, MASK_SMBB16) +DECLARE_INSN(smbt16, MATCH_SMBT16, MASK_SMBT16) +DECLARE_INSN(smtt16, MATCH_SMTT16, MASK_SMTT16) +DECLARE_INSN(smds, MATCH_SMDS, MASK_SMDS) +DECLARE_INSN(smdrs, MATCH_SMDRS, MASK_SMDRS) +DECLARE_INSN(smxds, MATCH_SMXDS, MASK_SMXDS) +DECLARE_INSN(smin8, MATCH_SMIN8, MASK_SMIN8) +DECLARE_INSN(smin16, MATCH_SMIN16, MASK_SMIN16) +DECLARE_INSN(smmul, MATCH_SMMUL, MASK_SMMUL) +DECLARE_INSN(smmul_u, MATCH_SMMUL_U, MASK_SMMUL_U) +DECLARE_INSN(smmwb, MATCH_SMMWB, MASK_SMMWB) +DECLARE_INSN(smmwb_u, MATCH_SMMWB_U, MASK_SMMWB_U) +DECLARE_INSN(smmwt, MATCH_SMMWT, MASK_SMMWT) +DECLARE_INSN(smmwt_u, MATCH_SMMWT_U, MASK_SMMWT_U) +DECLARE_INSN(smslda, MATCH_SMSLDA, MASK_SMSLDA) +DECLARE_INSN(smslxda, MATCH_SMSLXDA, MASK_SMSLXDA) +DECLARE_INSN(smsr64, MATCH_SMSR64, MASK_SMSR64) +DECLARE_INSN(smul8, MATCH_SMUL8, MASK_SMUL8) +DECLARE_INSN(smulx8, MATCH_SMULX8, MASK_SMULX8) +DECLARE_INSN(smul16, MATCH_SMUL16, MASK_SMUL16) +DECLARE_INSN(smulx16, MATCH_SMULX16, MASK_SMULX16) +DECLARE_INSN(sra_u, MATCH_SRA_U, MASK_SRA_U) +DECLARE_INSN(srai_u, MATCH_SRAI_U, MASK_SRAI_U) +DECLARE_INSN(sra8, MATCH_SRA8, MASK_SRA8) +DECLARE_INSN(sra8_u, MATCH_SRA8_U, MASK_SRA8_U) +DECLARE_INSN(srai8, MATCH_SRAI8, MASK_SRAI8) +DECLARE_INSN(srai8_u, MATCH_SRAI8_U, MASK_SRAI8_U) +DECLARE_INSN(sra16, MATCH_SRA16, MASK_SRA16) +DECLARE_INSN(sra16_u, MATCH_SRA16_U, MASK_SRA16_U) +DECLARE_INSN(srai16, MATCH_SRAI16, MASK_SRAI16) +DECLARE_INSN(srai16_u, MATCH_SRAI16_U, MASK_SRAI16_U) +DECLARE_INSN(srl8, MATCH_SRL8, MASK_SRL8) +DECLARE_INSN(srl8_u, MATCH_SRL8_U, MASK_SRL8_U) +DECLARE_INSN(srli8, MATCH_SRLI8, MASK_SRLI8) +DECLARE_INSN(srli8_u, MATCH_SRLI8_U, MASK_SRLI8_U) +DECLARE_INSN(srl16, MATCH_SRL16, MASK_SRL16) +DECLARE_INSN(srl16_u, MATCH_SRL16_U, MASK_SRL16_U) +DECLARE_INSN(srli16, MATCH_SRLI16, MASK_SRLI16) +DECLARE_INSN(srli16_u, MATCH_SRLI16_U, MASK_SRLI16_U) +DECLARE_INSN(stas16, MATCH_STAS16, MASK_STAS16) +DECLARE_INSN(stsa16, MATCH_STSA16, MASK_STSA16) +DECLARE_INSN(sub8, MATCH_SUB8, MASK_SUB8) +DECLARE_INSN(sub16, MATCH_SUB16, MASK_SUB16) +DECLARE_INSN(sub64, MATCH_SUB64, MASK_SUB64) +DECLARE_INSN(sunpkd810, MATCH_SUNPKD810, MASK_SUNPKD810) +DECLARE_INSN(sunpkd820, MATCH_SUNPKD820, MASK_SUNPKD820) +DECLARE_INSN(sunpkd830, MATCH_SUNPKD830, MASK_SUNPKD830) +DECLARE_INSN(sunpkd831, MATCH_SUNPKD831, MASK_SUNPKD831) +DECLARE_INSN(sunpkd832, MATCH_SUNPKD832, MASK_SUNPKD832) +DECLARE_INSN(swap8, MATCH_SWAP8, MASK_SWAP8) +DECLARE_INSN(uclip8, MATCH_UCLIP8, MASK_UCLIP8) +DECLARE_INSN(uclip16, MATCH_UCLIP16, MASK_UCLIP16) +DECLARE_INSN(uclip32, MATCH_UCLIP32, MASK_UCLIP32) +DECLARE_INSN(ucmple8, MATCH_UCMPLE8, MASK_UCMPLE8) +DECLARE_INSN(ucmple16, MATCH_UCMPLE16, MASK_UCMPLE16) +DECLARE_INSN(ucmplt8, MATCH_UCMPLT8, MASK_UCMPLT8) +DECLARE_INSN(ucmplt16, MATCH_UCMPLT16, MASK_UCMPLT16) +DECLARE_INSN(ukadd8, MATCH_UKADD8, MASK_UKADD8) +DECLARE_INSN(ukadd16, MATCH_UKADD16, MASK_UKADD16) +DECLARE_INSN(ukadd64, MATCH_UKADD64, MASK_UKADD64) +DECLARE_INSN(ukaddh, MATCH_UKADDH, MASK_UKADDH) +DECLARE_INSN(ukaddw, MATCH_UKADDW, MASK_UKADDW) +DECLARE_INSN(ukcras16, MATCH_UKCRAS16, MASK_UKCRAS16) +DECLARE_INSN(ukcrsa16, MATCH_UKCRSA16, MASK_UKCRSA16) +DECLARE_INSN(ukmar64, MATCH_UKMAR64, MASK_UKMAR64) +DECLARE_INSN(ukmsr64, MATCH_UKMSR64, MASK_UKMSR64) +DECLARE_INSN(ukstas16, MATCH_UKSTAS16, MASK_UKSTAS16) +DECLARE_INSN(ukstsa16, MATCH_UKSTSA16, MASK_UKSTSA16) +DECLARE_INSN(uksub8, MATCH_UKSUB8, MASK_UKSUB8) +DECLARE_INSN(uksub16, MATCH_UKSUB16, MASK_UKSUB16) +DECLARE_INSN(uksub64, MATCH_UKSUB64, MASK_UKSUB64) +DECLARE_INSN(uksubh, MATCH_UKSUBH, MASK_UKSUBH) +DECLARE_INSN(uksubw, MATCH_UKSUBW, MASK_UKSUBW) +DECLARE_INSN(umar64, MATCH_UMAR64, MASK_UMAR64) +DECLARE_INSN(umaqa, MATCH_UMAQA, MASK_UMAQA) +DECLARE_INSN(umax8, MATCH_UMAX8, MASK_UMAX8) +DECLARE_INSN(umax16, MATCH_UMAX16, MASK_UMAX16) +DECLARE_INSN(umin8, MATCH_UMIN8, MASK_UMIN8) +DECLARE_INSN(umin16, MATCH_UMIN16, MASK_UMIN16) +DECLARE_INSN(umsr64, MATCH_UMSR64, MASK_UMSR64) +DECLARE_INSN(umul8, MATCH_UMUL8, MASK_UMUL8) +DECLARE_INSN(umulx8, MATCH_UMULX8, MASK_UMULX8) +DECLARE_INSN(umul16, MATCH_UMUL16, MASK_UMUL16) +DECLARE_INSN(umulx16, MATCH_UMULX16, MASK_UMULX16) +DECLARE_INSN(uradd8, MATCH_URADD8, MASK_URADD8) +DECLARE_INSN(uradd16, MATCH_URADD16, MASK_URADD16) +DECLARE_INSN(uradd64, MATCH_URADD64, MASK_URADD64) +DECLARE_INSN(uraddw, MATCH_URADDW, MASK_URADDW) +DECLARE_INSN(urcras16, MATCH_URCRAS16, MASK_URCRAS16) +DECLARE_INSN(urcrsa16, MATCH_URCRSA16, MASK_URCRSA16) +DECLARE_INSN(urstas16, MATCH_URSTAS16, MASK_URSTAS16) +DECLARE_INSN(urstsa16, MATCH_URSTSA16, MASK_URSTSA16) +DECLARE_INSN(ursub8, MATCH_URSUB8, MASK_URSUB8) +DECLARE_INSN(ursub16, MATCH_URSUB16, MASK_URSUB16) +DECLARE_INSN(ursub64, MATCH_URSUB64, MASK_URSUB64) +DECLARE_INSN(ursubw, MATCH_URSUBW, MASK_URSUBW) +DECLARE_INSN(wexti, MATCH_WEXTI, MASK_WEXTI) +DECLARE_INSN(wext, MATCH_WEXT, MASK_WEXT) +DECLARE_INSN(zunpkd810, MATCH_ZUNPKD810, MASK_ZUNPKD810) +DECLARE_INSN(zunpkd820, MATCH_ZUNPKD820, MASK_ZUNPKD820) +DECLARE_INSN(zunpkd830, MATCH_ZUNPKD830, MASK_ZUNPKD830) +DECLARE_INSN(zunpkd831, MATCH_ZUNPKD831, MASK_ZUNPKD831) +DECLARE_INSN(zunpkd832, MATCH_ZUNPKD832, MASK_ZUNPKD832) +DECLARE_INSN(add32, MATCH_ADD32, MASK_ADD32) +DECLARE_INSN(cras32, MATCH_CRAS32, MASK_CRAS32) +DECLARE_INSN(crsa32, MATCH_CRSA32, MASK_CRSA32) +DECLARE_INSN(kabs32, MATCH_KABS32, MASK_KABS32) +DECLARE_INSN(kadd32, MATCH_KADD32, MASK_KADD32) +DECLARE_INSN(kcras32, MATCH_KCRAS32, MASK_KCRAS32) +DECLARE_INSN(kcrsa32, MATCH_KCRSA32, MASK_KCRSA32) +DECLARE_INSN(kdmbb16, MATCH_KDMBB16, MASK_KDMBB16) +DECLARE_INSN(kdmbt16, MATCH_KDMBT16, MASK_KDMBT16) +DECLARE_INSN(kdmtt16, MATCH_KDMTT16, MASK_KDMTT16) +DECLARE_INSN(kdmabb16, MATCH_KDMABB16, MASK_KDMABB16) +DECLARE_INSN(kdmabt16, MATCH_KDMABT16, MASK_KDMABT16) +DECLARE_INSN(kdmatt16, MATCH_KDMATT16, MASK_KDMATT16) +DECLARE_INSN(khmbb16, MATCH_KHMBB16, MASK_KHMBB16) +DECLARE_INSN(khmbt16, MATCH_KHMBT16, MASK_KHMBT16) +DECLARE_INSN(khmtt16, MATCH_KHMTT16, MASK_KHMTT16) +DECLARE_INSN(kmabb32, MATCH_KMABB32, MASK_KMABB32) +DECLARE_INSN(kmabt32, MATCH_KMABT32, MASK_KMABT32) +DECLARE_INSN(kmatt32, MATCH_KMATT32, MASK_KMATT32) +DECLARE_INSN(kmaxda32, MATCH_KMAXDA32, MASK_KMAXDA32) +DECLARE_INSN(kmda32, MATCH_KMDA32, MASK_KMDA32) +DECLARE_INSN(kmxda32, MATCH_KMXDA32, MASK_KMXDA32) +DECLARE_INSN(kmads32, MATCH_KMADS32, MASK_KMADS32) +DECLARE_INSN(kmadrs32, MATCH_KMADRS32, MASK_KMADRS32) +DECLARE_INSN(kmaxds32, MATCH_KMAXDS32, MASK_KMAXDS32) +DECLARE_INSN(kmsda32, MATCH_KMSDA32, MASK_KMSDA32) +DECLARE_INSN(kmsxda32, MATCH_KMSXDA32, MASK_KMSXDA32) +DECLARE_INSN(ksll32, MATCH_KSLL32, MASK_KSLL32) +DECLARE_INSN(kslli32, MATCH_KSLLI32, MASK_KSLLI32) +DECLARE_INSN(kslra32, MATCH_KSLRA32, MASK_KSLRA32) +DECLARE_INSN(kslra32_u, MATCH_KSLRA32_U, MASK_KSLRA32_U) +DECLARE_INSN(kstas32, MATCH_KSTAS32, MASK_KSTAS32) +DECLARE_INSN(kstsa32, MATCH_KSTSA32, MASK_KSTSA32) +DECLARE_INSN(ksub32, MATCH_KSUB32, MASK_KSUB32) +DECLARE_INSN(pkbb32, MATCH_PKBB32, MASK_PKBB32) +DECLARE_INSN(pkbt32, MATCH_PKBT32, MASK_PKBT32) +DECLARE_INSN(pktt32, MATCH_PKTT32, MASK_PKTT32) +DECLARE_INSN(pktb32, MATCH_PKTB32, MASK_PKTB32) +DECLARE_INSN(radd32, MATCH_RADD32, MASK_RADD32) +DECLARE_INSN(rcras32, MATCH_RCRAS32, MASK_RCRAS32) +DECLARE_INSN(rcrsa32, MATCH_RCRSA32, MASK_RCRSA32) +DECLARE_INSN(rstas32, MATCH_RSTAS32, MASK_RSTAS32) +DECLARE_INSN(rstsa32, MATCH_RSTSA32, MASK_RSTSA32) +DECLARE_INSN(rsub32, MATCH_RSUB32, MASK_RSUB32) +DECLARE_INSN(sll32, MATCH_SLL32, MASK_SLL32) +DECLARE_INSN(slli32, MATCH_SLLI32, MASK_SLLI32) +DECLARE_INSN(smax32, MATCH_SMAX32, MASK_SMAX32) +DECLARE_INSN(smbt32, MATCH_SMBT32, MASK_SMBT32) +DECLARE_INSN(smtt32, MATCH_SMTT32, MASK_SMTT32) +DECLARE_INSN(smds32, MATCH_SMDS32, MASK_SMDS32) +DECLARE_INSN(smdrs32, MATCH_SMDRS32, MASK_SMDRS32) +DECLARE_INSN(smxds32, MATCH_SMXDS32, MASK_SMXDS32) +DECLARE_INSN(smin32, MATCH_SMIN32, MASK_SMIN32) +DECLARE_INSN(sra32, MATCH_SRA32, MASK_SRA32) +DECLARE_INSN(sra32_u, MATCH_SRA32_U, MASK_SRA32_U) +DECLARE_INSN(srai32, MATCH_SRAI32, MASK_SRAI32) +DECLARE_INSN(srai32_u, MATCH_SRAI32_U, MASK_SRAI32_U) +DECLARE_INSN(sraiw_u, MATCH_SRAIW_U, MASK_SRAIW_U) +DECLARE_INSN(srl32, MATCH_SRL32, MASK_SRL32) +DECLARE_INSN(srl32_u, MATCH_SRL32_U, MASK_SRL32_U) +DECLARE_INSN(srli32, MATCH_SRLI32, MASK_SRLI32) +DECLARE_INSN(srli32_u, MATCH_SRLI32_U, MASK_SRLI32_U) +DECLARE_INSN(stas32, MATCH_STAS32, MASK_STAS32) +DECLARE_INSN(stsa32, MATCH_STSA32, MASK_STSA32) +DECLARE_INSN(sub32, MATCH_SUB32, MASK_SUB32) +DECLARE_INSN(ukadd32, MATCH_UKADD32, MASK_UKADD32) +DECLARE_INSN(ukcras32, MATCH_UKCRAS32, MASK_UKCRAS32) +DECLARE_INSN(ukcrsa32, MATCH_UKCRSA32, MASK_UKCRSA32) +DECLARE_INSN(ukstas32, MATCH_UKSTAS32, MASK_UKSTAS32) +DECLARE_INSN(ukstsa32, MATCH_UKSTSA32, MASK_UKSTSA32) +DECLARE_INSN(uksub32, MATCH_UKSUB32, MASK_UKSUB32) +DECLARE_INSN(umax32, MATCH_UMAX32, MASK_UMAX32) +DECLARE_INSN(umin32, MATCH_UMIN32, MASK_UMIN32) +DECLARE_INSN(uradd32, MATCH_URADD32, MASK_URADD32) +DECLARE_INSN(urcras32, MATCH_URCRAS32, MASK_URCRAS32) +DECLARE_INSN(urcrsa32, MATCH_URCRSA32, MASK_URCRSA32) +DECLARE_INSN(urstas32, MATCH_URSTAS32, MASK_URSTAS32) +DECLARE_INSN(urstsa32, MATCH_URSTSA32, MASK_URSTSA32) +DECLARE_INSN(ursub32, MATCH_URSUB32, MASK_URSUB32) +DECLARE_INSN(vmvnfr_v, MATCH_VMVNFR_V, MASK_VMVNFR_V) +DECLARE_INSN(vl1r_v, MATCH_VL1R_V, MASK_VL1R_V) +DECLARE_INSN(vl2r_v, MATCH_VL2R_V, MASK_VL2R_V) +DECLARE_INSN(vl4r_v, MATCH_VL4R_V, MASK_VL4R_V) +DECLARE_INSN(vl8r_v, MATCH_VL8R_V, MASK_VL8R_V) +DECLARE_INSN(vle1_v, MATCH_VLE1_V, MASK_VLE1_V) +DECLARE_INSN(vse1_v, MATCH_VSE1_V, MASK_VSE1_V) +DECLARE_INSN(vfredsum_vs, MATCH_VFREDSUM_VS, MASK_VFREDSUM_VS) +DECLARE_INSN(vfwredsum_vs, MATCH_VFWREDSUM_VS, MASK_VFWREDSUM_VS) +DECLARE_INSN(vpopc_m, MATCH_VPOPC_M, MASK_VPOPC_M) +DECLARE_INSN(vmornot_mm, MATCH_VMORNOT_MM, MASK_VMORNOT_MM) +DECLARE_INSN(vmandnot_mm, MATCH_VMANDNOT_MM, MASK_VMANDNOT_MM) +#endif +#ifdef DECLARE_CSR +DECLARE_CSR(fflags, CSR_FFLAGS) +DECLARE_CSR(frm, CSR_FRM) +DECLARE_CSR(fcsr, CSR_FCSR) +DECLARE_CSR(vstart, CSR_VSTART) +DECLARE_CSR(vxsat, CSR_VXSAT) +DECLARE_CSR(vxrm, CSR_VXRM) +DECLARE_CSR(vcsr, CSR_VCSR) +DECLARE_CSR(seed, CSR_SEED) +DECLARE_CSR(cycle, CSR_CYCLE) +DECLARE_CSR(time, CSR_TIME) +DECLARE_CSR(instret, CSR_INSTRET) +DECLARE_CSR(hpmcounter3, CSR_HPMCOUNTER3) +DECLARE_CSR(hpmcounter4, CSR_HPMCOUNTER4) +DECLARE_CSR(hpmcounter5, CSR_HPMCOUNTER5) +DECLARE_CSR(hpmcounter6, CSR_HPMCOUNTER6) +DECLARE_CSR(hpmcounter7, CSR_HPMCOUNTER7) +DECLARE_CSR(hpmcounter8, CSR_HPMCOUNTER8) +DECLARE_CSR(hpmcounter9, CSR_HPMCOUNTER9) +DECLARE_CSR(hpmcounter10, CSR_HPMCOUNTER10) +DECLARE_CSR(hpmcounter11, CSR_HPMCOUNTER11) +DECLARE_CSR(hpmcounter12, CSR_HPMCOUNTER12) +DECLARE_CSR(hpmcounter13, CSR_HPMCOUNTER13) +DECLARE_CSR(hpmcounter14, CSR_HPMCOUNTER14) +DECLARE_CSR(hpmcounter15, CSR_HPMCOUNTER15) +DECLARE_CSR(hpmcounter16, CSR_HPMCOUNTER16) +DECLARE_CSR(hpmcounter17, CSR_HPMCOUNTER17) +DECLARE_CSR(hpmcounter18, CSR_HPMCOUNTER18) +DECLARE_CSR(hpmcounter19, CSR_HPMCOUNTER19) +DECLARE_CSR(hpmcounter20, CSR_HPMCOUNTER20) +DECLARE_CSR(hpmcounter21, CSR_HPMCOUNTER21) +DECLARE_CSR(hpmcounter22, CSR_HPMCOUNTER22) +DECLARE_CSR(hpmcounter23, CSR_HPMCOUNTER23) +DECLARE_CSR(hpmcounter24, CSR_HPMCOUNTER24) +DECLARE_CSR(hpmcounter25, CSR_HPMCOUNTER25) +DECLARE_CSR(hpmcounter26, CSR_HPMCOUNTER26) +DECLARE_CSR(hpmcounter27, CSR_HPMCOUNTER27) +DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28) +DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29) +DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30) +DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31) +DECLARE_CSR(vl, CSR_VL) +DECLARE_CSR(vtype, CSR_VTYPE) +DECLARE_CSR(vlenb, CSR_VLENB) +DECLARE_CSR(sstatus, CSR_SSTATUS) +DECLARE_CSR(sedeleg, CSR_SEDELEG) +DECLARE_CSR(sideleg, CSR_SIDELEG) +DECLARE_CSR(sie, CSR_SIE) +DECLARE_CSR(stvec, CSR_STVEC) +DECLARE_CSR(scounteren, CSR_SCOUNTEREN) +DECLARE_CSR(senvcfg, CSR_SENVCFG) +DECLARE_CSR(sscratch, CSR_SSCRATCH) +DECLARE_CSR(sepc, CSR_SEPC) +DECLARE_CSR(scause, CSR_SCAUSE) +DECLARE_CSR(stval, CSR_STVAL) +DECLARE_CSR(sip, CSR_SIP) +DECLARE_CSR(satp, CSR_SATP) +DECLARE_CSR(scontext, CSR_SCONTEXT) +DECLARE_CSR(vsstatus, CSR_VSSTATUS) +DECLARE_CSR(vsie, CSR_VSIE) +DECLARE_CSR(vstvec, CSR_VSTVEC) +DECLARE_CSR(vsscratch, CSR_VSSCRATCH) +DECLARE_CSR(vsepc, CSR_VSEPC) +DECLARE_CSR(vscause, CSR_VSCAUSE) +DECLARE_CSR(vstval, CSR_VSTVAL) +DECLARE_CSR(vsip, CSR_VSIP) +DECLARE_CSR(vsatp, CSR_VSATP) +DECLARE_CSR(hstatus, CSR_HSTATUS) +DECLARE_CSR(hedeleg, CSR_HEDELEG) +DECLARE_CSR(hideleg, CSR_HIDELEG) +DECLARE_CSR(hie, CSR_HIE) +DECLARE_CSR(htimedelta, CSR_HTIMEDELTA) +DECLARE_CSR(hcounteren, CSR_HCOUNTEREN) +DECLARE_CSR(hgeie, CSR_HGEIE) +DECLARE_CSR(henvcfg, CSR_HENVCFG) +DECLARE_CSR(htval, CSR_HTVAL) +DECLARE_CSR(hip, CSR_HIP) +DECLARE_CSR(hvip, CSR_HVIP) +DECLARE_CSR(htinst, CSR_HTINST) +DECLARE_CSR(hgatp, CSR_HGATP) +DECLARE_CSR(hcontext, CSR_HCONTEXT) +DECLARE_CSR(hgeip, CSR_HGEIP) +DECLARE_CSR(utvt, CSR_UTVT) +DECLARE_CSR(unxti, CSR_UNXTI) +DECLARE_CSR(uintstatus, CSR_UINTSTATUS) +DECLARE_CSR(uscratchcsw, CSR_USCRATCHCSW) +DECLARE_CSR(uscratchcswl, CSR_USCRATCHCSWL) +DECLARE_CSR(stvt, CSR_STVT) +DECLARE_CSR(snxti, CSR_SNXTI) +DECLARE_CSR(sintstatus, CSR_SINTSTATUS) +DECLARE_CSR(sscratchcsw, CSR_SSCRATCHCSW) +DECLARE_CSR(sscratchcswl, CSR_SSCRATCHCSWL) +DECLARE_CSR(mtvt, CSR_MTVT) +DECLARE_CSR(mnxti, CSR_MNXTI) +DECLARE_CSR(mintstatus, CSR_MINTSTATUS) +DECLARE_CSR(mscratchcsw, CSR_MSCRATCHCSW) +DECLARE_CSR(mscratchcswl, CSR_MSCRATCHCSWL) +DECLARE_CSR(mstatus, CSR_MSTATUS) +DECLARE_CSR(misa, CSR_MISA) +DECLARE_CSR(medeleg, CSR_MEDELEG) +DECLARE_CSR(mideleg, CSR_MIDELEG) +DECLARE_CSR(mie, CSR_MIE) +DECLARE_CSR(mtvec, CSR_MTVEC) +DECLARE_CSR(mcounteren, CSR_MCOUNTEREN) +DECLARE_CSR(menvcfg, CSR_MENVCFG) +DECLARE_CSR(mcountinhibit, CSR_MCOUNTINHIBIT) +DECLARE_CSR(mscratch, CSR_MSCRATCH) +DECLARE_CSR(mepc, CSR_MEPC) +DECLARE_CSR(mcause, CSR_MCAUSE) +DECLARE_CSR(mtval, CSR_MTVAL) +DECLARE_CSR(mip, CSR_MIP) +DECLARE_CSR(mtinst, CSR_MTINST) +DECLARE_CSR(mtval2, CSR_MTVAL2) +DECLARE_CSR(pmpcfg0, CSR_PMPCFG0) +DECLARE_CSR(pmpcfg1, CSR_PMPCFG1) +DECLARE_CSR(pmpcfg2, CSR_PMPCFG2) +DECLARE_CSR(pmpcfg3, CSR_PMPCFG3) +DECLARE_CSR(pmpcfg4, CSR_PMPCFG4) +DECLARE_CSR(pmpcfg5, CSR_PMPCFG5) +DECLARE_CSR(pmpcfg6, CSR_PMPCFG6) +DECLARE_CSR(pmpcfg7, CSR_PMPCFG7) +DECLARE_CSR(pmpcfg8, CSR_PMPCFG8) +DECLARE_CSR(pmpcfg9, CSR_PMPCFG9) +DECLARE_CSR(pmpcfg10, CSR_PMPCFG10) +DECLARE_CSR(pmpcfg11, CSR_PMPCFG11) +DECLARE_CSR(pmpcfg12, CSR_PMPCFG12) +DECLARE_CSR(pmpcfg13, CSR_PMPCFG13) +DECLARE_CSR(pmpcfg14, CSR_PMPCFG14) +DECLARE_CSR(pmpcfg15, CSR_PMPCFG15) +DECLARE_CSR(pmpaddr0, CSR_PMPADDR0) +DECLARE_CSR(pmpaddr1, CSR_PMPADDR1) +DECLARE_CSR(pmpaddr2, CSR_PMPADDR2) +DECLARE_CSR(pmpaddr3, CSR_PMPADDR3) +DECLARE_CSR(pmpaddr4, CSR_PMPADDR4) +DECLARE_CSR(pmpaddr5, CSR_PMPADDR5) +DECLARE_CSR(pmpaddr6, CSR_PMPADDR6) +DECLARE_CSR(pmpaddr7, CSR_PMPADDR7) +DECLARE_CSR(pmpaddr8, CSR_PMPADDR8) +DECLARE_CSR(pmpaddr9, CSR_PMPADDR9) +DECLARE_CSR(pmpaddr10, CSR_PMPADDR10) +DECLARE_CSR(pmpaddr11, CSR_PMPADDR11) +DECLARE_CSR(pmpaddr12, CSR_PMPADDR12) +DECLARE_CSR(pmpaddr13, CSR_PMPADDR13) +DECLARE_CSR(pmpaddr14, CSR_PMPADDR14) +DECLARE_CSR(pmpaddr15, CSR_PMPADDR15) +DECLARE_CSR(pmpaddr16, CSR_PMPADDR16) +DECLARE_CSR(pmpaddr17, CSR_PMPADDR17) +DECLARE_CSR(pmpaddr18, CSR_PMPADDR18) +DECLARE_CSR(pmpaddr19, CSR_PMPADDR19) +DECLARE_CSR(pmpaddr20, CSR_PMPADDR20) +DECLARE_CSR(pmpaddr21, CSR_PMPADDR21) +DECLARE_CSR(pmpaddr22, CSR_PMPADDR22) +DECLARE_CSR(pmpaddr23, CSR_PMPADDR23) +DECLARE_CSR(pmpaddr24, CSR_PMPADDR24) +DECLARE_CSR(pmpaddr25, CSR_PMPADDR25) +DECLARE_CSR(pmpaddr26, CSR_PMPADDR26) +DECLARE_CSR(pmpaddr27, CSR_PMPADDR27) +DECLARE_CSR(pmpaddr28, CSR_PMPADDR28) +DECLARE_CSR(pmpaddr29, CSR_PMPADDR29) +DECLARE_CSR(pmpaddr30, CSR_PMPADDR30) +DECLARE_CSR(pmpaddr31, CSR_PMPADDR31) +DECLARE_CSR(pmpaddr32, CSR_PMPADDR32) +DECLARE_CSR(pmpaddr33, CSR_PMPADDR33) +DECLARE_CSR(pmpaddr34, CSR_PMPADDR34) +DECLARE_CSR(pmpaddr35, CSR_PMPADDR35) +DECLARE_CSR(pmpaddr36, CSR_PMPADDR36) +DECLARE_CSR(pmpaddr37, CSR_PMPADDR37) +DECLARE_CSR(pmpaddr38, CSR_PMPADDR38) +DECLARE_CSR(pmpaddr39, CSR_PMPADDR39) +DECLARE_CSR(pmpaddr40, CSR_PMPADDR40) +DECLARE_CSR(pmpaddr41, CSR_PMPADDR41) +DECLARE_CSR(pmpaddr42, CSR_PMPADDR42) +DECLARE_CSR(pmpaddr43, CSR_PMPADDR43) +DECLARE_CSR(pmpaddr44, CSR_PMPADDR44) +DECLARE_CSR(pmpaddr45, CSR_PMPADDR45) +DECLARE_CSR(pmpaddr46, CSR_PMPADDR46) +DECLARE_CSR(pmpaddr47, CSR_PMPADDR47) +DECLARE_CSR(pmpaddr48, CSR_PMPADDR48) +DECLARE_CSR(pmpaddr49, CSR_PMPADDR49) +DECLARE_CSR(pmpaddr50, CSR_PMPADDR50) +DECLARE_CSR(pmpaddr51, CSR_PMPADDR51) +DECLARE_CSR(pmpaddr52, CSR_PMPADDR52) +DECLARE_CSR(pmpaddr53, CSR_PMPADDR53) +DECLARE_CSR(pmpaddr54, CSR_PMPADDR54) +DECLARE_CSR(pmpaddr55, CSR_PMPADDR55) +DECLARE_CSR(pmpaddr56, CSR_PMPADDR56) +DECLARE_CSR(pmpaddr57, CSR_PMPADDR57) +DECLARE_CSR(pmpaddr58, CSR_PMPADDR58) +DECLARE_CSR(pmpaddr59, CSR_PMPADDR59) +DECLARE_CSR(pmpaddr60, CSR_PMPADDR60) +DECLARE_CSR(pmpaddr61, CSR_PMPADDR61) +DECLARE_CSR(pmpaddr62, CSR_PMPADDR62) +DECLARE_CSR(pmpaddr63, CSR_PMPADDR63) +DECLARE_CSR(mseccfg, CSR_MSECCFG) +DECLARE_CSR(tselect, CSR_TSELECT) +DECLARE_CSR(tdata1, CSR_TDATA1) +DECLARE_CSR(tdata2, CSR_TDATA2) +DECLARE_CSR(tdata3, CSR_TDATA3) +DECLARE_CSR(tinfo, CSR_TINFO) +DECLARE_CSR(tcontrol, CSR_TCONTROL) +DECLARE_CSR(mcontext, CSR_MCONTEXT) +DECLARE_CSR(mscontext, CSR_MSCONTEXT) +DECLARE_CSR(dcsr, CSR_DCSR) +DECLARE_CSR(dpc, CSR_DPC) +DECLARE_CSR(dscratch0, CSR_DSCRATCH0) +DECLARE_CSR(dscratch1, CSR_DSCRATCH1) +DECLARE_CSR(mcycle, CSR_MCYCLE) +DECLARE_CSR(minstret, CSR_MINSTRET) +DECLARE_CSR(mhpmcounter3, CSR_MHPMCOUNTER3) +DECLARE_CSR(mhpmcounter4, CSR_MHPMCOUNTER4) +DECLARE_CSR(mhpmcounter5, CSR_MHPMCOUNTER5) +DECLARE_CSR(mhpmcounter6, CSR_MHPMCOUNTER6) +DECLARE_CSR(mhpmcounter7, CSR_MHPMCOUNTER7) +DECLARE_CSR(mhpmcounter8, CSR_MHPMCOUNTER8) +DECLARE_CSR(mhpmcounter9, CSR_MHPMCOUNTER9) +DECLARE_CSR(mhpmcounter10, CSR_MHPMCOUNTER10) +DECLARE_CSR(mhpmcounter11, CSR_MHPMCOUNTER11) +DECLARE_CSR(mhpmcounter12, CSR_MHPMCOUNTER12) +DECLARE_CSR(mhpmcounter13, CSR_MHPMCOUNTER13) +DECLARE_CSR(mhpmcounter14, CSR_MHPMCOUNTER14) +DECLARE_CSR(mhpmcounter15, CSR_MHPMCOUNTER15) +DECLARE_CSR(mhpmcounter16, CSR_MHPMCOUNTER16) +DECLARE_CSR(mhpmcounter17, CSR_MHPMCOUNTER17) +DECLARE_CSR(mhpmcounter18, CSR_MHPMCOUNTER18) +DECLARE_CSR(mhpmcounter19, CSR_MHPMCOUNTER19) +DECLARE_CSR(mhpmcounter20, CSR_MHPMCOUNTER20) +DECLARE_CSR(mhpmcounter21, CSR_MHPMCOUNTER21) +DECLARE_CSR(mhpmcounter22, CSR_MHPMCOUNTER22) +DECLARE_CSR(mhpmcounter23, CSR_MHPMCOUNTER23) +DECLARE_CSR(mhpmcounter24, CSR_MHPMCOUNTER24) +DECLARE_CSR(mhpmcounter25, CSR_MHPMCOUNTER25) +DECLARE_CSR(mhpmcounter26, CSR_MHPMCOUNTER26) +DECLARE_CSR(mhpmcounter27, CSR_MHPMCOUNTER27) +DECLARE_CSR(mhpmcounter28, CSR_MHPMCOUNTER28) +DECLARE_CSR(mhpmcounter29, CSR_MHPMCOUNTER29) +DECLARE_CSR(mhpmcounter30, CSR_MHPMCOUNTER30) +DECLARE_CSR(mhpmcounter31, CSR_MHPMCOUNTER31) +DECLARE_CSR(mhpmevent3, CSR_MHPMEVENT3) +DECLARE_CSR(mhpmevent4, CSR_MHPMEVENT4) +DECLARE_CSR(mhpmevent5, CSR_MHPMEVENT5) +DECLARE_CSR(mhpmevent6, CSR_MHPMEVENT6) +DECLARE_CSR(mhpmevent7, CSR_MHPMEVENT7) +DECLARE_CSR(mhpmevent8, CSR_MHPMEVENT8) +DECLARE_CSR(mhpmevent9, CSR_MHPMEVENT9) +DECLARE_CSR(mhpmevent10, CSR_MHPMEVENT10) +DECLARE_CSR(mhpmevent11, CSR_MHPMEVENT11) +DECLARE_CSR(mhpmevent12, CSR_MHPMEVENT12) +DECLARE_CSR(mhpmevent13, CSR_MHPMEVENT13) +DECLARE_CSR(mhpmevent14, CSR_MHPMEVENT14) +DECLARE_CSR(mhpmevent15, CSR_MHPMEVENT15) +DECLARE_CSR(mhpmevent16, CSR_MHPMEVENT16) +DECLARE_CSR(mhpmevent17, CSR_MHPMEVENT17) +DECLARE_CSR(mhpmevent18, CSR_MHPMEVENT18) +DECLARE_CSR(mhpmevent19, CSR_MHPMEVENT19) +DECLARE_CSR(mhpmevent20, CSR_MHPMEVENT20) +DECLARE_CSR(mhpmevent21, CSR_MHPMEVENT21) +DECLARE_CSR(mhpmevent22, CSR_MHPMEVENT22) +DECLARE_CSR(mhpmevent23, CSR_MHPMEVENT23) +DECLARE_CSR(mhpmevent24, CSR_MHPMEVENT24) +DECLARE_CSR(mhpmevent25, CSR_MHPMEVENT25) +DECLARE_CSR(mhpmevent26, CSR_MHPMEVENT26) +DECLARE_CSR(mhpmevent27, CSR_MHPMEVENT27) +DECLARE_CSR(mhpmevent28, CSR_MHPMEVENT28) +DECLARE_CSR(mhpmevent29, CSR_MHPMEVENT29) +DECLARE_CSR(mhpmevent30, CSR_MHPMEVENT30) +DECLARE_CSR(mhpmevent31, CSR_MHPMEVENT31) +DECLARE_CSR(mvendorid, CSR_MVENDORID) +DECLARE_CSR(marchid, CSR_MARCHID) +DECLARE_CSR(mimpid, CSR_MIMPID) +DECLARE_CSR(mhartid, CSR_MHARTID) +DECLARE_CSR(mconfigptr, CSR_MCONFIGPTR) +DECLARE_CSR(htimedeltah, CSR_HTIMEDELTAH) +DECLARE_CSR(henvcfgh, CSR_HENVCFGH) +DECLARE_CSR(cycleh, CSR_CYCLEH) +DECLARE_CSR(timeh, CSR_TIMEH) +DECLARE_CSR(instreth, CSR_INSTRETH) +DECLARE_CSR(hpmcounter3h, CSR_HPMCOUNTER3H) +DECLARE_CSR(hpmcounter4h, CSR_HPMCOUNTER4H) +DECLARE_CSR(hpmcounter5h, CSR_HPMCOUNTER5H) +DECLARE_CSR(hpmcounter6h, CSR_HPMCOUNTER6H) +DECLARE_CSR(hpmcounter7h, CSR_HPMCOUNTER7H) +DECLARE_CSR(hpmcounter8h, CSR_HPMCOUNTER8H) +DECLARE_CSR(hpmcounter9h, CSR_HPMCOUNTER9H) +DECLARE_CSR(hpmcounter10h, CSR_HPMCOUNTER10H) +DECLARE_CSR(hpmcounter11h, CSR_HPMCOUNTER11H) +DECLARE_CSR(hpmcounter12h, CSR_HPMCOUNTER12H) +DECLARE_CSR(hpmcounter13h, CSR_HPMCOUNTER13H) +DECLARE_CSR(hpmcounter14h, CSR_HPMCOUNTER14H) +DECLARE_CSR(hpmcounter15h, CSR_HPMCOUNTER15H) +DECLARE_CSR(hpmcounter16h, CSR_HPMCOUNTER16H) +DECLARE_CSR(hpmcounter17h, CSR_HPMCOUNTER17H) +DECLARE_CSR(hpmcounter18h, CSR_HPMCOUNTER18H) +DECLARE_CSR(hpmcounter19h, CSR_HPMCOUNTER19H) +DECLARE_CSR(hpmcounter20h, CSR_HPMCOUNTER20H) +DECLARE_CSR(hpmcounter21h, CSR_HPMCOUNTER21H) +DECLARE_CSR(hpmcounter22h, CSR_HPMCOUNTER22H) +DECLARE_CSR(hpmcounter23h, CSR_HPMCOUNTER23H) +DECLARE_CSR(hpmcounter24h, CSR_HPMCOUNTER24H) +DECLARE_CSR(hpmcounter25h, CSR_HPMCOUNTER25H) +DECLARE_CSR(hpmcounter26h, CSR_HPMCOUNTER26H) +DECLARE_CSR(hpmcounter27h, CSR_HPMCOUNTER27H) +DECLARE_CSR(hpmcounter28h, CSR_HPMCOUNTER28H) +DECLARE_CSR(hpmcounter29h, CSR_HPMCOUNTER29H) +DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H) +DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H) +DECLARE_CSR(mstatush, CSR_MSTATUSH) +DECLARE_CSR(menvcfgh, CSR_MENVCFGH) +DECLARE_CSR(mseccfgh, CSR_MSECCFGH) +DECLARE_CSR(mcycleh, CSR_MCYCLEH) +DECLARE_CSR(minstreth, CSR_MINSTRETH) +DECLARE_CSR(mhpmcounter3h, CSR_MHPMCOUNTER3H) +DECLARE_CSR(mhpmcounter4h, CSR_MHPMCOUNTER4H) +DECLARE_CSR(mhpmcounter5h, CSR_MHPMCOUNTER5H) +DECLARE_CSR(mhpmcounter6h, CSR_MHPMCOUNTER6H) +DECLARE_CSR(mhpmcounter7h, CSR_MHPMCOUNTER7H) +DECLARE_CSR(mhpmcounter8h, CSR_MHPMCOUNTER8H) +DECLARE_CSR(mhpmcounter9h, CSR_MHPMCOUNTER9H) +DECLARE_CSR(mhpmcounter10h, CSR_MHPMCOUNTER10H) +DECLARE_CSR(mhpmcounter11h, CSR_MHPMCOUNTER11H) +DECLARE_CSR(mhpmcounter12h, CSR_MHPMCOUNTER12H) +DECLARE_CSR(mhpmcounter13h, CSR_MHPMCOUNTER13H) +DECLARE_CSR(mhpmcounter14h, CSR_MHPMCOUNTER14H) +DECLARE_CSR(mhpmcounter15h, CSR_MHPMCOUNTER15H) +DECLARE_CSR(mhpmcounter16h, CSR_MHPMCOUNTER16H) +DECLARE_CSR(mhpmcounter17h, CSR_MHPMCOUNTER17H) +DECLARE_CSR(mhpmcounter18h, CSR_MHPMCOUNTER18H) +DECLARE_CSR(mhpmcounter19h, CSR_MHPMCOUNTER19H) +DECLARE_CSR(mhpmcounter20h, CSR_MHPMCOUNTER20H) +DECLARE_CSR(mhpmcounter21h, CSR_MHPMCOUNTER21H) +DECLARE_CSR(mhpmcounter22h, CSR_MHPMCOUNTER22H) +DECLARE_CSR(mhpmcounter23h, CSR_MHPMCOUNTER23H) +DECLARE_CSR(mhpmcounter24h, CSR_MHPMCOUNTER24H) +DECLARE_CSR(mhpmcounter25h, CSR_MHPMCOUNTER25H) +DECLARE_CSR(mhpmcounter26h, CSR_MHPMCOUNTER26H) +DECLARE_CSR(mhpmcounter27h, CSR_MHPMCOUNTER27H) +DECLARE_CSR(mhpmcounter28h, CSR_MHPMCOUNTER28H) +DECLARE_CSR(mhpmcounter29h, CSR_MHPMCOUNTER29H) +DECLARE_CSR(mhpmcounter30h, CSR_MHPMCOUNTER30H) +DECLARE_CSR(mhpmcounter31h, CSR_MHPMCOUNTER31H) +#endif +#ifdef DECLARE_CAUSE +DECLARE_CAUSE("misaligned fetch", CAUSE_MISALIGNED_FETCH) +DECLARE_CAUSE("fetch access", CAUSE_FETCH_ACCESS) +DECLARE_CAUSE("illegal instruction", CAUSE_ILLEGAL_INSTRUCTION) +DECLARE_CAUSE("breakpoint", CAUSE_BREAKPOINT) +DECLARE_CAUSE("misaligned load", CAUSE_MISALIGNED_LOAD) +DECLARE_CAUSE("load access", CAUSE_LOAD_ACCESS) +DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE) +DECLARE_CAUSE("store access", CAUSE_STORE_ACCESS) +DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL) +DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL) +DECLARE_CAUSE("virtual_supervisor_ecall", CAUSE_VIRTUAL_SUPERVISOR_ECALL) +DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL) +DECLARE_CAUSE("fetch page fault", CAUSE_FETCH_PAGE_FAULT) +DECLARE_CAUSE("load page fault", CAUSE_LOAD_PAGE_FAULT) +DECLARE_CAUSE("store page fault", CAUSE_STORE_PAGE_FAULT) +DECLARE_CAUSE("fetch guest page fault", CAUSE_FETCH_GUEST_PAGE_FAULT) +DECLARE_CAUSE("load guest page fault", CAUSE_LOAD_GUEST_PAGE_FAULT) +DECLARE_CAUSE("virtual instruction", CAUSE_VIRTUAL_INSTRUCTION) +DECLARE_CAUSE("store guest page fault", CAUSE_STORE_GUEST_PAGE_FAULT) +#endif diff --git a/vendor/riscv-isa-sim/riscv/entropy_source.h b/vendor/riscv-isa-sim/riscv/entropy_source.h new file mode 100644 index 00000000..47823ff7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/entropy_source.h @@ -0,0 +1,119 @@ + +#include +#include + +#include "internals.h" + +// +// Used to model the cryptography extension entropy source. +// See Section 4 of the Scalar Cryptography Extension specificaiton. +class entropy_source { + +public: + + // Valid return codes for OPST bits [31:30] when reading seed. + static const uint32_t OPST_BIST = 0x0 << 30; + static const uint32_t OPST_WAIT = 0x1 << 30; + static const uint32_t OPST_ES16 = 0x2 << 30; + static const uint32_t OPST_DEAD = 0x3 << 30; + + // + // Other system events + // ------------------------------------------------------------ + + void reset() { + // Does nothing for now. In the future, can be used to model things + // like initial BIST states. + } + + // + // seed register + // ------------------------------------------------------------ + + void set_seed(reg_t val) { + // Always ignore writes to seed. + // This CSR is strictly read only. It occupies a RW CSR address + // to handle the side-effect of the changing seed value on a read. + } + + + // + // The format of seed is described in Section 4.1 of + // the scalar cryptography specification. + reg_t get_seed() { + + uint32_t result = 0; + + // Currently, always return ES16 (i.e. good randomness) In the future, we + // can more realistically model things like WAIT states, BIST warm up and + // maybe scriptable entry into the DEAD state, but until then, this is + // the bare minimum. + uint32_t return_status = OPST_ES16; + + if(return_status == OPST_ES16) { + + // Add some sampled entropy into the low 16 bits + uint16_t entropy = this -> get_two_random_bytes(); + result |= entropy; + + } else if(return_status == OPST_BIST) { + + // Do nothing. + + } else if(return_status == OPST_WAIT) { + + // Do nothing. + + } else if(return_status == OPST_DEAD) { + + // Do nothing. Stay dead. + + } else { + + // Unreachable. + + } + + // Note that if we get here any return_status is anything other than + // OPST_ES16, then the low 16-bits of the return value must be zero. + + result |= return_status; + + // Result is zero-extended on RV64. + return (reg_t)result; + } + + // + // Utility / support variables and functions. + // ------------------------------------------------------------ + + // The file to read entropy from. + std::string randomness_source = "/dev/urandom"; + + // Read two random bytes from the entropy source file. + uint16_t get_two_random_bytes() { + + std::ifstream fh(this -> randomness_source, std::ios::binary); + + if(fh.is_open()) { + + uint16_t random_bytes; + + fh.read((char*)(&random_bytes), 2); + + fh.close(); + + return random_bytes; + + } else { + + fprintf(stderr, "Could not open randomness source file:\n\t"); + fprintf(stderr, "%s", randomness_source.c_str()); + abort(); + + } + + } + +}; + diff --git a/vendor/riscv-isa-sim/riscv/execute.cc b/vendor/riscv-isa-sim/riscv/execute.cc new file mode 100644 index 00000000..98e3cdb0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/execute.cc @@ -0,0 +1,358 @@ +// See LICENSE for license details. + +#include "processor.h" +#include "mmu.h" +#include "disasm.h" +#include + +#ifdef RISCV_ENABLE_COMMITLOG +static void commit_log_reset(processor_t* p) +{ + p->get_state()->log_reg_write.clear(); + p->get_state()->log_mem_read.clear(); + p->get_state()->log_mem_write.clear(); +} + +static void commit_log_stash_privilege(processor_t* p) +{ + state_t* state = p->get_state(); + state->last_inst_priv = state->prv; + state->last_inst_xlen = p->get_xlen(); + state->last_inst_flen = p->get_flen(); +} + +static void commit_log_print_value(FILE *log_file, int width, const void *data) +{ + assert(log_file); + + switch (width) { + case 8: + fprintf(log_file, "0x%01" PRIx8, *(const uint8_t *)data); + break; + case 16: + fprintf(log_file, "0x%04" PRIx16, *(const uint16_t *)data); + break; + case 32: + fprintf(log_file, "0x%08" PRIx32, *(const uint32_t *)data); + break; + case 64: + fprintf(log_file, "0x%016" PRIx64, *(const uint64_t *)data); + break; + default: + // max lengh of vector + if (((width - 1) & width) == 0) { + const uint64_t *arr = (const uint64_t *)data; + + fprintf(log_file, "0x"); + for (int idx = width / 64 - 1; idx >= 0; --idx) { + fprintf(log_file, "%016" PRIx64, arr[idx]); + } + } else { + abort(); + } + break; + } +} + +static void commit_log_print_value(FILE *log_file, int width, uint64_t val) +{ + commit_log_print_value(log_file, width, &val); +} + +const char* processor_t::get_symbol(uint64_t addr) +{ + return sim->get_symbol(addr); +} + +static void commit_log_print_insn(processor_t *p, reg_t pc, insn_t insn) +{ + FILE *log_file = p->get_log_file(); + + auto& reg = p->get_state()->log_reg_write; + auto& load = p->get_state()->log_mem_read; + auto& store = p->get_state()->log_mem_write; + int priv = p->get_state()->last_inst_priv; + int xlen = p->get_state()->last_inst_xlen; + int flen = p->get_state()->last_inst_flen; + + // print core id on all lines so it is easy to grep + fprintf(log_file, "core%4" PRId32 ": ", p->get_id()); + + fprintf(log_file, "%1d ", priv); + commit_log_print_value(log_file, xlen, pc); + fprintf(log_file, " ("); + commit_log_print_value(log_file, insn.length() * 8, insn.bits()); + fprintf(log_file, ")"); + bool show_vec = false; + + for (auto item : reg) { + if (item.first == 0) + continue; + + char prefix; + int size; + int rd = item.first >> 4; + bool is_vec = false; + bool is_vreg = false; + switch (item.first & 0xf) { + case 0: + size = xlen; + prefix = 'x'; + break; + case 1: + size = flen; + prefix = 'f'; + break; + case 2: + size = p->VU.VLEN; + prefix = 'v'; + is_vreg = true; + break; + case 3: + is_vec = true; + break; + case 4: + size = xlen; + prefix = 'c'; + break; + default: + assert("can't been here" && 0); + break; + } + + if (!show_vec && (is_vreg || is_vec)) { + fprintf(log_file, " e%ld %s%ld l%ld", + p->VU.vsew, + p->VU.vflmul < 1 ? "mf" : "m", + p->VU.vflmul < 1 ? (reg_t)(1 / p->VU.vflmul) : (reg_t)p->VU.vflmul, + p->VU.vl->read()); + show_vec = true; + } + + if (!is_vec) { + if (prefix == 'c') + fprintf(log_file, " c%d_%s ", rd, csr_name(rd)); + else + fprintf(log_file, " %c%2d ", prefix, rd); + if (is_vreg) + commit_log_print_value(log_file, size, &p->VU.elt(rd, 0)); + else + commit_log_print_value(log_file, size, item.second.v); + } + } + + for (auto item : load) { + fprintf(log_file, " mem "); + commit_log_print_value(log_file, xlen, std::get<0>(item)); + } + + for (auto item : store) { + fprintf(log_file, " mem "); + commit_log_print_value(log_file, xlen, std::get<0>(item)); + fprintf(log_file, " "); + commit_log_print_value(log_file, std::get<2>(item) << 3, std::get<1>(item)); + } + fprintf(log_file, "\n"); +} +#else +static void commit_log_reset(processor_t* p) {} +static void commit_log_stash_privilege(processor_t* p) {} +static void commit_log_print_insn(processor_t* p, reg_t pc, insn_t insn) {} +#endif + +inline void processor_t::update_histogram(reg_t pc) +{ +#ifdef RISCV_ENABLE_HISTOGRAM + pc_histogram[pc]++; +#endif +} + +// This is expected to be inlined by the compiler so each use of execute_insn +// includes a duplicated body of the function to get separate fetch.func +// function calls. +static inline reg_t execute_insn(processor_t* p, reg_t pc, insn_fetch_t fetch) +{ + commit_log_reset(p); + commit_log_stash_privilege(p); + reg_t npc; + + try { + npc = fetch.func(p, fetch.insn, pc); + if (npc != PC_SERIALIZE_BEFORE) { + +#ifdef RISCV_ENABLE_COMMITLOG + if (p->get_log_commits_enabled()) { + commit_log_print_insn(p, pc, fetch.insn); + } +#endif + + } +#ifdef RISCV_ENABLE_COMMITLOG + } catch (wait_for_interrupt_t &t) { + if (p->get_log_commits_enabled()) { + commit_log_print_insn(p, pc, fetch.insn); + } + throw; + } catch(mem_trap_t& t) { + //handle segfault in midlle of vector load/store + if (p->get_log_commits_enabled()) { + for (auto item : p->get_state()->log_reg_write) { + if ((item.first & 3) == 3) { + commit_log_print_insn(p, pc, fetch.insn); + break; + } + } + } + throw; +#endif + } catch(...) { + throw; + } + p->update_histogram(pc); + + return npc; +} + +bool processor_t::slow_path() +{ + return debug || state.single_step != state.STEP_NONE || state.debug_mode; +} + +// fetch/decode/execute loop +void processor_t::step(size_t n) +{ + if (!state.debug_mode) { + if (halt_request == HR_REGULAR) { + enter_debug_mode(DCSR_CAUSE_DEBUGINT); + } else if (halt_request == HR_GROUP) { + enter_debug_mode(DCSR_CAUSE_GROUP); + } // !!!The halt bit in DCSR is deprecated. + else if (state.dcsr->halt) { + enter_debug_mode(DCSR_CAUSE_HALT); + } + } + + while (n > 0) { + size_t instret = 0; + reg_t pc = state.pc; + mmu_t* _mmu = mmu; + + #define advance_pc() \ + if (unlikely(invalid_pc(pc))) { \ + switch (pc) { \ + case PC_SERIALIZE_BEFORE: state.serialized = true; break; \ + case PC_SERIALIZE_AFTER: ++instret; break; \ + case PC_SERIALIZE_WFI: n = ++instret; break; \ + default: abort(); \ + } \ + pc = state.pc; \ + break; \ + } else { \ + state.pc = pc; \ + instret++; \ + } + + try + { + take_pending_interrupt(); + + if (unlikely(slow_path())) + { + // Main simulation loop, slow path. + while (instret < n) + { + if (unlikely(!state.serialized && state.single_step == state.STEP_STEPPED)) { + state.single_step = state.STEP_NONE; + if (!state.debug_mode) { + enter_debug_mode(DCSR_CAUSE_STEP); + // enter_debug_mode changed state.pc, so we can't just continue. + break; + } + } + + if (unlikely(state.single_step == state.STEP_STEPPING)) { + state.single_step = state.STEP_STEPPED; + } + + insn_fetch_t fetch = mmu->load_insn(pc); + if (debug && !state.serialized) + disasm(fetch.insn); + pc = execute_insn(this, pc, fetch); + advance_pc(); + } + } + else while (instret < n) + { + // Main simulation loop, fast path. + for (auto ic_entry = _mmu->access_icache(pc); ; ) { + auto fetch = ic_entry->data; + pc = execute_insn(this, pc, fetch); + ic_entry = ic_entry->next; + if (unlikely(ic_entry->tag != pc)) + break; + if (unlikely(instret + 1 == n)) + break; + instret++; + state.pc = pc; + } + + advance_pc(); + } + } + catch(trap_t& t) + { + take_trap(t, pc); + n = instret; + + if (unlikely(state.single_step == state.STEP_STEPPED)) { + state.single_step = state.STEP_NONE; + enter_debug_mode(DCSR_CAUSE_STEP); + } + } + catch (triggers::matched_t& t) + { + if (mmu->matched_trigger) { + // This exception came from the MMU. That means the instruction hasn't + // fully executed yet. We start it again, but this time it won't throw + // an exception because matched_trigger is already set. (All memory + // instructions are idempotent so restarting is safe.) + + insn_fetch_t fetch = mmu->load_insn(pc); + pc = execute_insn(this, pc, fetch); + advance_pc(); + + delete mmu->matched_trigger; + mmu->matched_trigger = NULL; + } + switch (t.action) { + case triggers::ACTION_DEBUG_MODE: + enter_debug_mode(DCSR_CAUSE_HWBP); + break; + case triggers::ACTION_DEBUG_EXCEPTION: { + trap_breakpoint trap(state.v, t.address); + take_trap(trap, pc); + break; + } + default: + abort(); + } + } + catch (wait_for_interrupt_t &t) + { + // Return to the outer simulation loop, which gives other devices/harts a + // chance to generate interrupts. + // + // In the debug ROM this prevents us from wasting time looping, but also + // allows us to switch to other threads only once per idle loop in case + // there is activity. + n = ++instret; + } + + state.minstret->bump(instret); + + // Model a hart whose CPI is 1. + state.mcycle->bump(instret); + + n -= instret; + } +} diff --git a/vendor/riscv-isa-sim/riscv/extension.cc b/vendor/riscv-isa-sim/riscv/extension.cc new file mode 100644 index 00000000..520c2ed5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/extension.cc @@ -0,0 +1,23 @@ +// See LICENSE for license details. + +#include "extension.h" +#include "trap.h" + +extension_t::~extension_t() +{ +} + +void extension_t::illegal_instruction() +{ + throw trap_illegal_instruction(0); +} + +void extension_t::raise_interrupt() +{ + p->take_interrupt((reg_t)1 << IRQ_COP); // must not return + throw std::logic_error("a COP exception was posted, but interrupts are disabled!"); +} + +void extension_t::clear_interrupt() +{ +} diff --git a/vendor/riscv-isa-sim/riscv/extension.h b/vendor/riscv-isa-sim/riscv/extension.h new file mode 100644 index 00000000..d1e847d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/extension.h @@ -0,0 +1,38 @@ +// See LICENSE for license details. + +#ifndef _RISCV_COPROCESSOR_H +#define _RISCV_COPROCESSOR_H + +#include "processor.h" +#include "disasm.h" +#include +#include + +class extension_t +{ + public: + virtual std::vector get_instructions() = 0; + virtual std::vector get_disasms() = 0; + virtual const char* name() = 0; + virtual void reset() {}; + virtual void set_debug(bool value) {}; + virtual ~extension_t(); + + void set_processor(processor_t* _p) { p = _p; } + protected: + processor_t* p; + + void illegal_instruction(); + void raise_interrupt(); + void clear_interrupt(); +}; + +std::function find_extension(const char* name); +void register_extension(const char* name, std::function f); + +#define REGISTER_EXTENSION(name, constructor) \ + class register_##name { \ + public: register_##name() { register_extension(#name, constructor); } \ + }; static register_##name dummy_##name; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/extensions.cc b/vendor/riscv-isa-sim/riscv/extensions.cc new file mode 100644 index 00000000..347dc5e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/extensions.cc @@ -0,0 +1,46 @@ +// See LICENSE for license details. + +#include "extension.h" +#include +#include +#include + +static std::map>& extensions() +{ + static std::map> v; + return v; +} + +void register_extension(const char* name, std::function f) +{ + extensions()[name] = f; +} + +std::function find_extension(const char* name) +{ + if (!extensions().count(name)) { + // try to find extension xyz by loading libxyz.so + std::string libname = std::string("lib") + name + ".so"; + std::string libdefault = "libcustomext.so"; + bool is_default = false; + auto dlh = dlopen(libname.c_str(), RTLD_LAZY); + if (!dlh) { + dlh = dlopen(libdefault.c_str(), RTLD_LAZY); + if (!dlh) { + fprintf(stderr, "couldn't find shared library either '%s' or '%s')\n", + libname.c_str(), libdefault.c_str()); + exit(-1); + } + + is_default = true; + } + + if (!extensions().count(name)) { + fprintf(stderr, "couldn't find extension '%s' in shared library '%s'\n", + name, is_default ? libdefault.c_str() : libname.c_str()); + exit(-1); + } + } + + return extensions()[name]; +} diff --git a/vendor/riscv-isa-sim/riscv/insn_macros.h b/vendor/riscv-isa-sim/riscv/insn_macros.h new file mode 100644 index 00000000..2fdfcedc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insn_macros.h @@ -0,0 +1,9 @@ +#ifndef _RISCV_INSN_MACROS_H +#define _RISCV_INSN_MACROS_H + +// These conflict with Boost headers so can't be included from insn_template.h +#define P (*p) + +#define require(x) do { if (unlikely(!(x))) throw trap_illegal_instruction(insn.bits()); } while (0) + +#endif diff --git a/vendor/riscv-isa-sim/riscv/insn_template.cc b/vendor/riscv-isa-sim/riscv/insn_template.cc new file mode 100644 index 00000000..e6a2f52c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insn_template.cc @@ -0,0 +1,47 @@ +// See LICENSE for license details. + +#include "insn_template.h" +#include "insn_macros.h" + +reg_t rv32i_NAME(processor_t* p, insn_t insn, reg_t pc) +{ + #define xlen 32 + reg_t npc = sext_xlen(pc + insn_length(OPCODE)); + #include "insns/NAME.h" + trace_opcode(p, OPCODE, insn); + #undef xlen + return npc; +} + +reg_t rv64i_NAME(processor_t* p, insn_t insn, reg_t pc) +{ + #define xlen 64 + reg_t npc = sext_xlen(pc + insn_length(OPCODE)); + #include "insns/NAME.h" + trace_opcode(p, OPCODE, insn); + #undef xlen + return npc; +} + +#undef CHECK_REG +#define CHECK_REG(reg) require((reg) < 16) + +reg_t rv32e_NAME(processor_t* p, insn_t insn, reg_t pc) +{ + #define xlen 32 + reg_t npc = sext_xlen(pc + insn_length(OPCODE)); + #include "insns/NAME.h" + trace_opcode(p, OPCODE, insn); + #undef xlen + return npc; +} + +reg_t rv64e_NAME(processor_t* p, insn_t insn, reg_t pc) +{ + #define xlen 64 + reg_t npc = sext_xlen(pc + insn_length(OPCODE)); + #include "insns/NAME.h" + trace_opcode(p, OPCODE, insn); + #undef xlen + return npc; +} diff --git a/vendor/riscv-isa-sim/riscv/insn_template.h b/vendor/riscv-isa-sim/riscv/insn_template.h new file mode 100644 index 00000000..3c36d10e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insn_template.h @@ -0,0 +1,9 @@ +// See LICENSE for license details. + +#include "arith.h" +#include "mmu.h" +#include "softfloat.h" +#include "internals.h" +#include "specialize.h" +#include "tracer.h" +#include diff --git a/vendor/riscv-isa-sim/riscv/insns/add.h b/vendor/riscv-isa-sim/riscv/insns/add.h new file mode 100644 index 00000000..895e2b18 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(RS1 + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/add16.h b/vendor/riscv-isa-sim/riscv/insns/add16.h new file mode 100644 index 00000000..fae43165 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/add32.h b/vendor/riscv-isa-sim/riscv/insns/add32.h new file mode 100644 index 00000000..ca544cef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add32.h @@ -0,0 +1,4 @@ +require_rv64; +P_LOOP(32, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/add64.h b/vendor/riscv-isa-sim/riscv/insns/add64.h new file mode 100644 index 00000000..0968656f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add64.h @@ -0,0 +1,3 @@ +P_64_PROFILE({ + rd = rs1 + rs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/add8.h b/vendor/riscv-isa-sim/riscv/insns/add8.h new file mode 100644 index 00000000..bb54a7b6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/add_uw.h b/vendor/riscv-isa-sim/riscv/insns/add_uw.h new file mode 100644 index 00000000..5b25a367 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/add_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen(zext32(RS1) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/addi.h b/vendor/riscv-isa-sim/riscv/insns/addi.h new file mode 100644 index 00000000..1bb5dced --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/addi.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/addiw.h b/vendor/riscv-isa-sim/riscv/insns/addiw.h new file mode 100644 index 00000000..4263eada --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/addiw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(insn.i_imm() + RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/addw.h b/vendor/riscv-isa-sim/riscv/insns/addw.h new file mode 100644 index 00000000..706dc9c8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/addw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(RS1 + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/aes32dsi.h b/vendor/riscv-isa-sim/riscv/insns/aes32dsi.h new file mode 100644 index 00000000..b2680b01 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes32dsi.h @@ -0,0 +1,16 @@ + +#include "aes_common.h" + +require_rv32; +require_extension(EXT_ZKND); + +uint8_t bs = insn.bs(); + +uint8_t t0 = RS2 >> (8*bs); +uint8_t x = AES_DEC_SBOX[t0]; +uint32_t u = x; + +u = (u << (8*bs)) | (u >> (32-8*bs)); + +WRITE_RD(sext_xlen(u ^ RS1)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes32dsmi.h b/vendor/riscv-isa-sim/riscv/insns/aes32dsmi.h new file mode 100644 index 00000000..d76abc08 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes32dsmi.h @@ -0,0 +1,21 @@ + +#include "aes_common.h" + +require_rv32; +require_extension(EXT_ZKND); + +uint8_t bs = insn.bs(); + +uint8_t t0 = RS2 >> (8*bs); +uint8_t x = AES_DEC_SBOX[t0]; +uint32_t u ; + +u = (AES_GFMUL(x,0xb) << 24) | + (AES_GFMUL(x,0xd) << 16) | + (AES_GFMUL(x,0x9) << 8) | + (AES_GFMUL(x,0xe) << 0) ; + +u = (u << (8*bs)) | (u >> (32-8*bs)); + +WRITE_RD(sext_xlen(u ^ RS1)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes32esi.h b/vendor/riscv-isa-sim/riscv/insns/aes32esi.h new file mode 100644 index 00000000..d0c0a63b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes32esi.h @@ -0,0 +1,16 @@ + +#include "aes_common.h" + +require_rv32; +require_extension(EXT_ZKNE); + +uint8_t bs = insn.bs(); + +uint8_t t0 = RS2 >> (8*bs); +uint8_t x = AES_ENC_SBOX[t0]; +uint32_t u = x; + +u = (u << (8*bs)) | (u >> (32-8*bs)); + +WRITE_RD(sext_xlen(u ^ RS1)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes32esmi.h b/vendor/riscv-isa-sim/riscv/insns/aes32esmi.h new file mode 100644 index 00000000..069718d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes32esmi.h @@ -0,0 +1,21 @@ + +#include "aes_common.h" + +require_rv32; +require_extension(EXT_ZKNE); + +uint8_t bs = insn.bs(); + +uint8_t t0 = RS2 >> (8*bs); +uint8_t x = AES_ENC_SBOX[t0]; +uint32_t u ; + +u = (AES_GFMUL(x,3) << 24) | + ( x << 16) | + ( x << 8) | + (AES_GFMUL(x,2) << 0) ; + +u = (u << (8*bs)) | (u >> (32-8*bs)); + +WRITE_RD(sext_xlen(u ^ RS1)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64ds.h b/vendor/riscv-isa-sim/riscv/insns/aes64ds.h new file mode 100644 index 00000000..64baf87a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64ds.h @@ -0,0 +1,21 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKND); + +uint64_t temp = AES_INVSHIFROWS_LO(RS1,RS2); + + temp = ( + ((uint64_t)AES_DEC_SBOX[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_DEC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_DEC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_DEC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_DEC_SBOX[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_DEC_SBOX[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_DEC_SBOX[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_DEC_SBOX[(temp >> 56) & 0xFF] << 56) +); + +WRITE_RD(temp); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64dsm.h b/vendor/riscv-isa-sim/riscv/insns/aes64dsm.h new file mode 100644 index 00000000..eccf02fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64dsm.h @@ -0,0 +1,29 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKND); + +uint64_t temp = AES_INVSHIFROWS_LO(RS1,RS2); + + temp = ( + ((uint64_t)AES_DEC_SBOX[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_DEC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_DEC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_DEC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_DEC_SBOX[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_DEC_SBOX[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_DEC_SBOX[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_DEC_SBOX[(temp >> 56) & 0xFF] << 56) +); + +uint32_t col_0 = temp & 0xFFFFFFFF; +uint32_t col_1 = temp >> 32 ; + + col_0 = AES_INVMIXCOLUMN(col_0); + col_1 = AES_INVMIXCOLUMN(col_1); + +uint64_t result= ((uint64_t)col_1 << 32) | col_0; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64es.h b/vendor/riscv-isa-sim/riscv/insns/aes64es.h new file mode 100644 index 00000000..6bbc4efe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64es.h @@ -0,0 +1,21 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKNE); + +uint64_t temp = AES_SHIFROWS_LO(RS1,RS2); + + temp = ( + ((uint64_t)AES_ENC_SBOX[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_ENC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_ENC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_ENC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_ENC_SBOX[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_ENC_SBOX[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_ENC_SBOX[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_ENC_SBOX[(temp >> 56) & 0xFF] << 56) +); + +WRITE_RD(temp); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64esm.h b/vendor/riscv-isa-sim/riscv/insns/aes64esm.h new file mode 100644 index 00000000..0351c11b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64esm.h @@ -0,0 +1,29 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKNE); + +uint64_t temp = AES_SHIFROWS_LO(RS1,RS2); + + temp = ( + ((uint64_t)AES_ENC_SBOX[(temp >> 0) & 0xFF] << 0) | + ((uint64_t)AES_ENC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint64_t)AES_ENC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint64_t)AES_ENC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint64_t)AES_ENC_SBOX[(temp >> 32) & 0xFF] << 32) | + ((uint64_t)AES_ENC_SBOX[(temp >> 40) & 0xFF] << 40) | + ((uint64_t)AES_ENC_SBOX[(temp >> 48) & 0xFF] << 48) | + ((uint64_t)AES_ENC_SBOX[(temp >> 56) & 0xFF] << 56) +); + +uint32_t col_0 = temp & 0xFFFFFFFF; +uint32_t col_1 = temp >> 32 ; + + col_0 = AES_MIXCOLUMN(col_0); + col_1 = AES_MIXCOLUMN(col_1); + +uint64_t result= ((uint64_t)col_1 << 32) | col_0; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64im.h b/vendor/riscv-isa-sim/riscv/insns/aes64im.h new file mode 100644 index 00000000..9dd9b021 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64im.h @@ -0,0 +1,16 @@ + +#include "aes_common.h" + +require_rv64; +require_extension(EXT_ZKND); + +uint32_t col_0 = RS1 & 0xFFFFFFFF; +uint32_t col_1 = RS1 >> 32 ; + + col_0 = AES_INVMIXCOLUMN(col_0); + col_1 = AES_INVMIXCOLUMN(col_1); + +uint64_t result= ((uint64_t)col_1 << 32) | col_0; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64ks1i.h b/vendor/riscv-isa-sim/riscv/insns/aes64ks1i.h new file mode 100644 index 00000000..fff7109c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64ks1i.h @@ -0,0 +1,38 @@ + +#include "aes_common.h" + +require_rv64; +require_either_extension(EXT_ZKND, EXT_ZKNE); + +uint8_t round_consts [10] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 +}; + +uint8_t enc_rcon = insn.rcon() ; + +if(enc_rcon > 0xA) { + // Invalid opcode. + throw trap_illegal_instruction(0); +} + +uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF ; +uint8_t rcon = 0 ; +uint64_t result ; + +if(enc_rcon != 0xA) { + temp = (temp >> 8) | (temp << 24); // Rotate right by 8 + rcon = round_consts[enc_rcon]; +} + +temp = + ((uint32_t)AES_ENC_SBOX[(temp >> 24) & 0xFF] << 24) | + ((uint32_t)AES_ENC_SBOX[(temp >> 16) & 0xFF] << 16) | + ((uint32_t)AES_ENC_SBOX[(temp >> 8) & 0xFF] << 8) | + ((uint32_t)AES_ENC_SBOX[(temp >> 0) & 0xFF] << 0) ; + +temp ^= rcon; + +result = ((uint64_t)temp << 32) | temp; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes64ks2.h b/vendor/riscv-isa-sim/riscv/insns/aes64ks2.h new file mode 100644 index 00000000..65d5a77c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes64ks2.h @@ -0,0 +1,16 @@ + +#include "aes_common.h" + +require_rv64; +require_either_extension(EXT_ZKND, EXT_ZKNE); + +uint32_t rs1_hi = RS1 >> 32; +uint32_t rs2_lo = RS2 ; +uint32_t rs2_hi = RS2 >> 32; + +uint32_t r_lo = (rs1_hi ^ rs2_lo ) ; +uint32_t r_hi = (rs1_hi ^ rs2_lo ^ rs2_hi) ; +uint64_t result = ((uint64_t)r_hi << 32) | r_lo ; + +WRITE_RD(result); + diff --git a/vendor/riscv-isa-sim/riscv/insns/aes_common.h b/vendor/riscv-isa-sim/riscv/insns/aes_common.h new file mode 100644 index 00000000..9cc353c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/aes_common.h @@ -0,0 +1,156 @@ + +uint8_t AES_ENC_SBOX[]= { + 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, + 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, + 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, + 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, + 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, + 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, + 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, + 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, + 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, + 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, + 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, + 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, + 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, + 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, + 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, + 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, + 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, + 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, + 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, + 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, + 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, + 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, + 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, + 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, + 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, + 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, + 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, + 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, + 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, + 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, + 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, + 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 +}; + +uint8_t AES_DEC_SBOX[] = { + 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, + 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, + 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, + 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, + 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, + 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, + 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, + 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, + 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, + 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, + 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, + 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, + 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, + 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, + 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, + 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, + 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, + 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, + 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, + 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, + 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, + 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, + 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, + 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, + 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, + 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, + 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, + 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, + 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, + 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, + 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, + 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D +}; + +#define AES_UNPACK_BYTES(b0,b1,b2,b3) \ + uint8_t b0 = (RS1 >> 0) & 0xFF; \ + uint8_t b1 = (RS2 >> 8) & 0xFF; \ + uint8_t b2 = (RS1 >> 16) & 0xFF; \ + uint8_t b3 = (RS2 >> 24) & 0xFF; \ + +#define AES_PACK_BYTES(b0,b1,b2,b3) ( \ + (uint32_t)b0 << 0 | \ + (uint32_t)b1 << 8 | \ + (uint32_t)b2 << 16 | \ + (uint32_t)b3 << 24 ) + +#define AES_SBOX(b0, b1, b2, b3) \ + b0 = AES_ENC_SBOX[b0]; \ + b1 = AES_ENC_SBOX[b1]; \ + b2 = AES_ENC_SBOX[b2]; \ + b3 = AES_ENC_SBOX[b3]; \ + +#define AES_RSBOX(b0, b1, b2, b3) \ + b0 = AES_DEC_SBOX[b0]; \ + b1 = AES_DEC_SBOX[b1]; \ + b2 = AES_DEC_SBOX[b2]; \ + b3 = AES_DEC_SBOX[b3]; \ + +#define AES_XTIME(a) \ + ((a << 1) ^ ((a&0x80) ? 0x1b : 0)) + +#define AES_GFMUL(a,b) (( \ + ( ( (b) & 0x1 ) ? (a) : 0 ) ^ \ + ( ( (b) & 0x2 ) ? AES_XTIME(a) : 0 ) ^ \ + ( ( (b) & 0x4 ) ? AES_XTIME(AES_XTIME(a)) : 0 ) ^ \ + ( ( (b) & 0x8 ) ? AES_XTIME(AES_XTIME(AES_XTIME(a))) : 0 ) )&0xFF) + +#define BY(X,I) ((X >> (8*I)) & 0xFF) + +#define AES_SHIFROWS_LO(RS1,RS2) ( \ + (((RS1 >> 24) & 0xFF) << 56) | \ + (((RS2 >> 48) & 0xFF) << 48) | \ + (((RS2 >> 8) & 0xFF) << 40) | \ + (((RS1 >> 32) & 0xFF) << 32) | \ + (((RS2 >> 56) & 0xFF) << 24) | \ + (((RS2 >> 16) & 0xFF) << 16) | \ + (((RS1 >> 40) & 0xFF) << 8) | \ + (((RS1 >> 0) & 0xFF) << 0) ) + +#define AES_INVSHIFROWS_LO(RS1,RS2) ( \ + (((RS2 >> 24) & 0xFF) << 56) | \ + (((RS2 >> 48) & 0xFF) << 48) | \ + (((RS1 >> 8) & 0xFF) << 40) | \ + (((RS1 >> 32) & 0xFF) << 32) | \ + (((RS1 >> 56) & 0xFF) << 24) | \ + (((RS2 >> 16) & 0xFF) << 16) | \ + (((RS2 >> 40) & 0xFF) << 8) | \ + (((RS1 >> 0) & 0xFF) << 0) ) + + +#define AES_MIXBYTE(COL,B0,B1,B2,B3) ( \ + BY(COL,B3) ^ \ + BY(COL,B2) ^ \ + AES_GFMUL(BY(COL,B1), 3) ^ \ + AES_GFMUL(BY(COL,B0), 2) \ +) + +#define AES_MIXCOLUMN(COL) ( \ + AES_MIXBYTE(COL,3,0,1,2) << 24 | \ + AES_MIXBYTE(COL,2,3,0,1) << 16 | \ + AES_MIXBYTE(COL,1,2,3,0) << 8 | \ + AES_MIXBYTE(COL,0,1,2,3) << 0 \ +) + + +#define AES_INVMIXBYTE(COL,B0,B1,B2,B3) ( \ + AES_GFMUL(BY(COL,B3),0x9) ^ \ + AES_GFMUL(BY(COL,B2),0xd) ^ \ + AES_GFMUL(BY(COL,B1),0xb) ^ \ + AES_GFMUL(BY(COL,B0),0xe) \ +) + +#define AES_INVMIXCOLUMN(COL) ( \ + AES_INVMIXBYTE(COL,3,0,1,2) << 24 | \ + AES_INVMIXBYTE(COL,2,3,0,1) << 16 | \ + AES_INVMIXBYTE(COL,1,2,3,0) << 8 | \ + AES_INVMIXBYTE(COL,0,1,2,3) << 0 \ +) + diff --git a/vendor/riscv-isa-sim/riscv/insns/amoadd_d.h b/vendor/riscv-isa-sim/riscv/insns/amoadd_d.h new file mode 100644 index 00000000..6090fbc5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoadd_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs + RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoadd_w.h b/vendor/riscv-isa-sim/riscv/insns/amoadd_w.h new file mode 100644 index 00000000..2c6471af --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoadd_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs + RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoand_d.h b/vendor/riscv-isa-sim/riscv/insns/amoand_d.h new file mode 100644 index 00000000..80aea184 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoand_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs & RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoand_w.h b/vendor/riscv-isa-sim/riscv/insns/amoand_w.h new file mode 100644 index 00000000..f7e1ba7c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoand_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs & RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomax_d.h b/vendor/riscv-isa-sim/riscv/insns/amomax_d.h new file mode 100644 index 00000000..496d8ada --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomax_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](int64_t lhs) { return std::max(lhs, int64_t(RS2)); })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomax_w.h b/vendor/riscv-isa-sim/riscv/insns/amomax_w.h new file mode 100644 index 00000000..757bdd2c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomax_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](int32_t lhs) { return std::max(lhs, int32_t(RS2)); }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomaxu_d.h b/vendor/riscv-isa-sim/riscv/insns/amomaxu_d.h new file mode 100644 index 00000000..12b17331 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomaxu_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return std::max(lhs, RS2); })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomaxu_w.h b/vendor/riscv-isa-sim/riscv/insns/amomaxu_w.h new file mode 100644 index 00000000..538df1c4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomaxu_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return std::max(lhs, uint32_t(RS2)); }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomin_d.h b/vendor/riscv-isa-sim/riscv/insns/amomin_d.h new file mode 100644 index 00000000..725d9839 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomin_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](int64_t lhs) { return std::min(lhs, int64_t(RS2)); })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amomin_w.h b/vendor/riscv-isa-sim/riscv/insns/amomin_w.h new file mode 100644 index 00000000..ee53faa0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amomin_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](int32_t lhs) { return std::min(lhs, int32_t(RS2)); }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amominu_d.h b/vendor/riscv-isa-sim/riscv/insns/amominu_d.h new file mode 100644 index 00000000..15b6c0a4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amominu_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return std::min(lhs, RS2); })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amominu_w.h b/vendor/riscv-isa-sim/riscv/insns/amominu_w.h new file mode 100644 index 00000000..52e1141b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amominu_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return std::min(lhs, uint32_t(RS2)); }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoor_d.h b/vendor/riscv-isa-sim/riscv/insns/amoor_d.h new file mode 100644 index 00000000..de876274 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoor_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs | RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoor_w.h b/vendor/riscv-isa-sim/riscv/insns/amoor_w.h new file mode 100644 index 00000000..3455981d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoor_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs | RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoswap_d.h b/vendor/riscv-isa-sim/riscv/insns/amoswap_d.h new file mode 100644 index 00000000..e1bffdeb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoswap_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoswap_w.h b/vendor/riscv-isa-sim/riscv/insns/amoswap_w.h new file mode 100644 index 00000000..0f78369c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoswap_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoxor_d.h b/vendor/riscv-isa-sim/riscv/insns/amoxor_d.h new file mode 100644 index 00000000..1b3c0bf4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoxor_d.h @@ -0,0 +1,3 @@ +require_extension('A'); +require_rv64; +WRITE_RD(MMU.amo_uint64(RS1, [&](uint64_t lhs) { return lhs ^ RS2; })); diff --git a/vendor/riscv-isa-sim/riscv/insns/amoxor_w.h b/vendor/riscv-isa-sim/riscv/insns/amoxor_w.h new file mode 100644 index 00000000..a1ea82f1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/amoxor_w.h @@ -0,0 +1,2 @@ +require_extension('A'); +WRITE_RD(sext32(MMU.amo_uint32(RS1, [&](uint32_t lhs) { return lhs ^ RS2; }))); diff --git a/vendor/riscv-isa-sim/riscv/insns/and.h b/vendor/riscv-isa-sim/riscv/insns/and.h new file mode 100644 index 00000000..86b48831 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/and.h @@ -0,0 +1 @@ +WRITE_RD(RS1 & RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/andi.h b/vendor/riscv-isa-sim/riscv/insns/andi.h new file mode 100644 index 00000000..bcc51e44 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/andi.h @@ -0,0 +1 @@ +WRITE_RD(insn.i_imm() & RS1); diff --git a/vendor/riscv-isa-sim/riscv/insns/andn.h b/vendor/riscv-isa-sim/riscv/insns/andn.h new file mode 100644 index 00000000..8add1919 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/andn.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +WRITE_RD(RS1 & ~RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/auipc.h b/vendor/riscv-isa-sim/riscv/insns/auipc.h new file mode 100644 index 00000000..1a2b169b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/auipc.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(insn.u_imm() + pc)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ave.h b/vendor/riscv-isa-sim/riscv/insns/ave.h new file mode 100644 index 00000000..59979002 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ave.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZPN); +sreg_t rs1 = RS1; +sreg_t rs2 = RS2; +sreg_t carry = (rs1 & 1) | (rs2 & 1); +WRITE_RD(sext_xlen((rs1 >> 1) + (rs2 >> 1) + carry)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bclr.h b/vendor/riscv-isa-sim/riscv/insns/bclr.h new file mode 100644 index 00000000..589273e6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bclr.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = RS2 & (xlen-1); +WRITE_RD(sext_xlen(RS1 & ~(1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bclri.h b/vendor/riscv-isa-sim/riscv/insns/bclri.h new file mode 100644 index 00000000..8df6a5f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bclri.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = SHAMT & (xlen-1); +WRITE_RD(sext_xlen(RS1 & ~(1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bcompress.h b/vendor/riscv-isa-sim/riscv/insns/bcompress.h new file mode 100644 index 00000000..579346f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bcompress.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBE); +uint64_t c = 0, i = 0, data = zext_xlen(RS1), mask = zext_xlen(RS2); +while (mask) { + uint64_t b = mask & ~((mask | (mask-1)) + 1); + c |= (data & b) >> (ctz(b) - i); + i += popcount(b); + mask -= b; +} +WRITE_RD(sext_xlen(c)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bcompressw.h b/vendor/riscv-isa-sim/riscv/insns/bcompressw.h new file mode 100644 index 00000000..2c1017cd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bcompressw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBE); +uint64_t c = 0, i = 0, data = zext32(RS1), mask = zext32(RS2); +while (mask) { + uint64_t b = mask & ~((mask | (mask-1)) + 1); + c |= (data & b) >> (ctz(b) - i); + i += popcount(b); + mask -= b; +} +WRITE_RD(sext32(c)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bdecompress.h b/vendor/riscv-isa-sim/riscv/insns/bdecompress.h new file mode 100644 index 00000000..2894be01 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bdecompress.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBE); +uint64_t c = 0, i = 0, data = zext_xlen(RS1), mask = zext_xlen(RS2); +while (mask) { + uint64_t b = mask & ~((mask | (mask-1)) + 1); + c |= (data << (ctz(b) - i)) & b; + i += popcount(b); + mask -= b; +} +WRITE_RD(sext_xlen(c)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bdecompressw.h b/vendor/riscv-isa-sim/riscv/insns/bdecompressw.h new file mode 100644 index 00000000..468a7260 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bdecompressw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBE); +uint64_t c = 0, i = 0, data = zext32(RS1), mask = zext32(RS2); +while (mask) { + uint64_t b = mask & ~((mask | (mask-1)) + 1); + c |= (data << (ctz(b) - i)) & b; + i += popcount(b); + mask -= b; +} +WRITE_RD(sext32(c)); diff --git a/vendor/riscv-isa-sim/riscv/insns/beq.h b/vendor/riscv-isa-sim/riscv/insns/beq.h new file mode 100644 index 00000000..fd7e0614 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/beq.h @@ -0,0 +1,2 @@ +if(RS1 == RS2) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bext.h b/vendor/riscv-isa-sim/riscv/insns/bext.h new file mode 100644 index 00000000..24c80b07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bext.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = RS2 & (xlen-1); +WRITE_RD(sext_xlen(1 & (RS1 >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bexti.h b/vendor/riscv-isa-sim/riscv/insns/bexti.h new file mode 100644 index 00000000..31d23166 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bexti.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = SHAMT & (xlen-1); +WRITE_RD(sext_xlen(1 & (RS1 >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bfp.h b/vendor/riscv-isa-sim/riscv/insns/bfp.h new file mode 100644 index 00000000..886d8405 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bfp.h @@ -0,0 +1,10 @@ +require_extension(EXT_XZBF); +reg_t cfg = RS2 >> (xlen/2); +if ((cfg >> 30) == 2) + cfg = cfg >> 16; +int len = (cfg >> 8) & (xlen/2-1); +int off = cfg & (xlen-1); +len = len ? len : xlen/2; +reg_t mask = ~(~reg_t(0) << len) << off; +reg_t data = RS2 << off; +WRITE_RD(sext_xlen((data & mask) | (RS1 & ~mask))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bfpw.h b/vendor/riscv-isa-sim/riscv/insns/bfpw.h new file mode 100644 index 00000000..42479e72 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bfpw.h @@ -0,0 +1,9 @@ +require_rv64; +require_extension(EXT_XZBF); +reg_t cfg = RS2 >> 16; +int len = (cfg >> 8) & 15; +int off = cfg & 31; +len = len ? len : 16; +reg_t mask = ~(~reg_t(0) << len) << off; +reg_t data = RS2 << off; +WRITE_RD(sext32((data & mask) | (RS1 & ~mask))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bge.h b/vendor/riscv-isa-sim/riscv/insns/bge.h new file mode 100644 index 00000000..da0c68e6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bge.h @@ -0,0 +1,2 @@ +if(sreg_t(RS1) >= sreg_t(RS2)) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bgeu.h b/vendor/riscv-isa-sim/riscv/insns/bgeu.h new file mode 100644 index 00000000..d764a347 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bgeu.h @@ -0,0 +1,2 @@ +if(RS1 >= RS2) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/binv.h b/vendor/riscv-isa-sim/riscv/insns/binv.h new file mode 100644 index 00000000..cef5b780 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/binv.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = RS2 & (xlen-1); +WRITE_RD(sext_xlen(RS1 ^ (1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/binvi.h b/vendor/riscv-isa-sim/riscv/insns/binvi.h new file mode 100644 index 00000000..3272d393 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/binvi.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = SHAMT & (xlen-1); +WRITE_RD(sext_xlen(RS1 ^ (1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/blt.h b/vendor/riscv-isa-sim/riscv/insns/blt.h new file mode 100644 index 00000000..c54fb769 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/blt.h @@ -0,0 +1,2 @@ +if(sreg_t(RS1) < sreg_t(RS2)) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bltu.h b/vendor/riscv-isa-sim/riscv/insns/bltu.h new file mode 100644 index 00000000..ff75e8a6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bltu.h @@ -0,0 +1,2 @@ +if(RS1 < RS2) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bmatflip.h b/vendor/riscv-isa-sim/riscv/insns/bmatflip.h new file mode 100644 index 00000000..c10df8f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bmatflip.h @@ -0,0 +1,11 @@ +require_rv64; +require_extension(EXT_XZBM); +reg_t x = RS1; +for (int i = 0; i < 3; i++) { + x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); + x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); + x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); + x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); + x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +} +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bmator.h b/vendor/riscv-isa-sim/riscv/insns/bmator.h new file mode 100644 index 00000000..33057ca0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bmator.h @@ -0,0 +1,29 @@ +require_rv64; +require_extension(EXT_XZBM); + +// transpose of rs2 +int64_t rs2t = RS2; +for (int i = 0; i < 3; i++) { + rs2t = (rs2t & 0xFFFF00000000FFFFLL) | ((rs2t & 0x0000FFFF00000000LL) >> 16) | ((rs2t & 0x00000000FFFF0000LL) << 16); + rs2t = (rs2t & 0xFF0000FFFF0000FFLL) | ((rs2t & 0x00FF000000FF0000LL) >> 8) | ((rs2t & 0x0000FF000000FF00LL) << 8); + rs2t = (rs2t & 0xF00FF00FF00FF00FLL) | ((rs2t & 0x0F000F000F000F00LL) >> 4) | ((rs2t & 0x00F000F000F000F0LL) << 4); + rs2t = (rs2t & 0xC3C3C3C3C3C3C3C3LL) | ((rs2t & 0x3030303030303030LL) >> 2) | ((rs2t & 0x0C0C0C0C0C0C0C0CLL) << 2); + rs2t = (rs2t & 0x9999999999999999LL) | ((rs2t & 0x4444444444444444LL) >> 1) | ((rs2t & 0x2222222222222222LL) << 1); +} + +int64_t rs1 = RS1; +uint8_t u[8]; // rows of rs1 +uint8_t v[8]; // cols of rs2 + +for (int i = 0; i < 8; i++) { + u[i] = rs1 >> (i*8); + v[i] = rs2t >> (i*8); +} + +uint64_t x = 0; +for (int i = 0; i < 64; i++) { + if ((u[i / 8] & v[i % 8]) != 0) + x |= 1LL << i; +} + +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bmatxor.h b/vendor/riscv-isa-sim/riscv/insns/bmatxor.h new file mode 100644 index 00000000..ca2d0967 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bmatxor.h @@ -0,0 +1,29 @@ +require_rv64; +require_extension(EXT_XZBM); + +// transpose of rs2 +int64_t rs2t = RS2; +for (int i = 0; i < 3; i++) { + rs2t = (rs2t & 0xFFFF00000000FFFFLL) | ((rs2t & 0x0000FFFF00000000LL) >> 16) | ((rs2t & 0x00000000FFFF0000LL) << 16); + rs2t = (rs2t & 0xFF0000FFFF0000FFLL) | ((rs2t & 0x00FF000000FF0000LL) >> 8) | ((rs2t & 0x0000FF000000FF00LL) << 8); + rs2t = (rs2t & 0xF00FF00FF00FF00FLL) | ((rs2t & 0x0F000F000F000F00LL) >> 4) | ((rs2t & 0x00F000F000F000F0LL) << 4); + rs2t = (rs2t & 0xC3C3C3C3C3C3C3C3LL) | ((rs2t & 0x3030303030303030LL) >> 2) | ((rs2t & 0x0C0C0C0C0C0C0C0CLL) << 2); + rs2t = (rs2t & 0x9999999999999999LL) | ((rs2t & 0x4444444444444444LL) >> 1) | ((rs2t & 0x2222222222222222LL) << 1); +} + +int64_t rs1 = RS1; +uint8_t u[8]; // rows of rs1 +uint8_t v[8]; // cols of rs2 + +for (int i = 0; i < 8; i++) { + u[i] = rs1 >> (i*8); + v[i] = rs2t >> (i*8); +} + +uint64_t x = 0; +for (int i = 0; i < 64; i++) { + if (popcount(u[i / 8] & v[i % 8]) & 1) + x |= 1LL << i; +} + +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/bne.h b/vendor/riscv-isa-sim/riscv/insns/bne.h new file mode 100644 index 00000000..1e6cb7c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bne.h @@ -0,0 +1,2 @@ +if(RS1 != RS2) + set_pc(BRANCH_TARGET); diff --git a/vendor/riscv-isa-sim/riscv/insns/bset.h b/vendor/riscv-isa-sim/riscv/insns/bset.h new file mode 100644 index 00000000..9009fb32 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bset.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = RS2 & (xlen-1); +WRITE_RD(sext_xlen(RS1 | (1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/bseti.h b/vendor/riscv-isa-sim/riscv/insns/bseti.h new file mode 100644 index 00000000..49523786 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/bseti.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZBS); +int shamt = SHAMT & (xlen-1); +WRITE_RD(sext_xlen(RS1 | (1LL << shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_add.h b/vendor/riscv-isa-sim/riscv/insns/c_add.h new file mode 100644 index 00000000..ab7d4d4c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_add.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_rs2() != 0); +WRITE_RD(sext_xlen(RVC_RS1 + RVC_RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_addi.h b/vendor/riscv-isa-sim/riscv/insns/c_addi.h new file mode 100644 index 00000000..eb983442 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_addi.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RD(sext_xlen(RVC_RS1 + insn.rvc_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_addi4spn.h b/vendor/riscv-isa-sim/riscv/insns/c_addi4spn.h new file mode 100644 index 00000000..e5f3832f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_addi4spn.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_addi4spn_imm() != 0); +WRITE_RVC_RS2S(sext_xlen(RVC_SP + insn.rvc_addi4spn_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_addw.h b/vendor/riscv-isa-sim/riscv/insns/c_addw.h new file mode 100644 index 00000000..6e0ae3a5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_addw.h @@ -0,0 +1,3 @@ +require_extension('C'); +require_rv64; +WRITE_RVC_RS1S(sext32(RVC_RS1S + RVC_RS2S)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_and.h b/vendor/riscv-isa-sim/riscv/insns/c_and.h new file mode 100644 index 00000000..4d7bab6c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_and.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(RVC_RS1S & RVC_RS2S); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_andi.h b/vendor/riscv-isa-sim/riscv/insns/c_andi.h new file mode 100644 index 00000000..9de5a1ac --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_andi.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(RVC_RS1S & insn.rvc_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_beqz.h b/vendor/riscv-isa-sim/riscv/insns/c_beqz.h new file mode 100644 index 00000000..35c11960 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_beqz.h @@ -0,0 +1,3 @@ +require_extension('C'); +if (RVC_RS1S == 0) + set_pc(pc + insn.rvc_b_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_bnez.h b/vendor/riscv-isa-sim/riscv/insns/c_bnez.h new file mode 100644 index 00000000..1e40ea78 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_bnez.h @@ -0,0 +1,3 @@ +require_extension('C'); +if (RVC_RS1S != 0) + set_pc(pc + insn.rvc_b_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_ebreak.h b/vendor/riscv-isa-sim/riscv/insns/c_ebreak.h new file mode 100644 index 00000000..7d04f46d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_ebreak.h @@ -0,0 +1,2 @@ +require_extension('C'); +throw trap_breakpoint(STATE.v, pc); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fld.h b/vendor/riscv-isa-sim/riscv/insns/c_fld.h new file mode 100644 index 00000000..319615b8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fld.h @@ -0,0 +1,4 @@ +require_extension('C'); +require_extension('D'); +require_fp; +WRITE_RVC_FRS2S(f64(MMU.load_uint64(RVC_RS1S + insn.rvc_ld_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fldsp.h b/vendor/riscv-isa-sim/riscv/insns/c_fldsp.h new file mode 100644 index 00000000..534eef7d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fldsp.h @@ -0,0 +1,4 @@ +require_extension('C'); +require_extension('D'); +require_fp; +WRITE_FRD(f64(MMU.load_uint64(RVC_SP + insn.rvc_ldsp_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_flw.h b/vendor/riscv-isa-sim/riscv/insns/c_flw.h new file mode 100644 index 00000000..682566c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_flw.h @@ -0,0 +1,8 @@ +require_extension('C'); +if (xlen == 32) { + require_extension('F'); + require_fp; + WRITE_RVC_FRS2S(f32(MMU.load_uint32(RVC_RS1S + insn.rvc_lw_imm()))); +} else { // c.ld + WRITE_RVC_RS2S(MMU.load_int64(RVC_RS1S + insn.rvc_ld_imm())); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_flwsp.h b/vendor/riscv-isa-sim/riscv/insns/c_flwsp.h new file mode 100644 index 00000000..79058c40 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_flwsp.h @@ -0,0 +1,9 @@ +require_extension('C'); +if (xlen == 32) { + require_extension('F'); + require_fp; + WRITE_FRD(f32(MMU.load_uint32(RVC_SP + insn.rvc_lwsp_imm()))); +} else { // c.ldsp + require(insn.rvc_rd() != 0); + WRITE_RD(MMU.load_int64(RVC_SP + insn.rvc_ldsp_imm())); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fsd.h b/vendor/riscv-isa-sim/riscv/insns/c_fsd.h new file mode 100644 index 00000000..6f2c8f4c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fsd.h @@ -0,0 +1,4 @@ +require_extension('C'); +require_extension('D'); +require_fp; +MMU.store_uint64(RVC_RS1S + insn.rvc_ld_imm(), RVC_FRS2S.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fsdsp.h b/vendor/riscv-isa-sim/riscv/insns/c_fsdsp.h new file mode 100644 index 00000000..27b93319 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fsdsp.h @@ -0,0 +1,4 @@ +require_extension('C'); +require_extension('D'); +require_fp; +MMU.store_uint64(RVC_SP + insn.rvc_sdsp_imm(), RVC_FRS2.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fsw.h b/vendor/riscv-isa-sim/riscv/insns/c_fsw.h new file mode 100644 index 00000000..70858229 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fsw.h @@ -0,0 +1,8 @@ +require_extension('C'); +if (xlen == 32) { + require_extension('F'); + require_fp; + MMU.store_uint32(RVC_RS1S + insn.rvc_lw_imm(), RVC_FRS2S.v[0]); +} else { // c.sd + MMU.store_uint64(RVC_RS1S + insn.rvc_ld_imm(), RVC_RS2S); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_fswsp.h b/vendor/riscv-isa-sim/riscv/insns/c_fswsp.h new file mode 100644 index 00000000..c5a003fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_fswsp.h @@ -0,0 +1,8 @@ +require_extension('C'); +if (xlen == 32) { + require_extension('F'); + require_fp; + MMU.store_uint32(RVC_SP + insn.rvc_swsp_imm(), RVC_FRS2.v[0]); +} else { // c.sdsp + MMU.store_uint64(RVC_SP + insn.rvc_sdsp_imm(), RVC_RS2); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_j.h b/vendor/riscv-isa-sim/riscv/insns/c_j.h new file mode 100644 index 00000000..6d8939c4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_j.h @@ -0,0 +1,2 @@ +require_extension('C'); +set_pc(pc + insn.rvc_j_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_jal.h b/vendor/riscv-isa-sim/riscv/insns/c_jal.h new file mode 100644 index 00000000..4f156f61 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_jal.h @@ -0,0 +1,9 @@ +require_extension('C'); +if (xlen == 32) { + reg_t tmp = npc; + set_pc(pc + insn.rvc_j_imm()); + WRITE_REG(X_RA, tmp); +} else { // c.addiw + require(insn.rvc_rd() != 0); + WRITE_RD(sext32(RVC_RS1 + insn.rvc_imm())); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_jalr.h b/vendor/riscv-isa-sim/riscv/insns/c_jalr.h new file mode 100644 index 00000000..cb1e4222 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_jalr.h @@ -0,0 +1,5 @@ +require_extension('C'); +require(insn.rvc_rs1() != 0); +reg_t tmp = npc; +set_pc(RVC_RS1 & ~reg_t(1)); +WRITE_REG(X_RA, tmp); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_jr.h b/vendor/riscv-isa-sim/riscv/insns/c_jr.h new file mode 100644 index 00000000..9c4a8ea9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_jr.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_rs1() != 0); +set_pc(RVC_RS1 & ~reg_t(1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_li.h b/vendor/riscv-isa-sim/riscv/insns/c_li.h new file mode 100644 index 00000000..f9fd66b2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_li.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RD(insn.rvc_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_lui.h b/vendor/riscv-isa-sim/riscv/insns/c_lui.h new file mode 100644 index 00000000..75d8eb89 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_lui.h @@ -0,0 +1,8 @@ +require_extension('C'); +if (insn.rvc_rd() == 2) { // c.addi16sp + require(insn.rvc_addi16sp_imm() != 0); + WRITE_REG(X_SP, sext_xlen(RVC_SP + insn.rvc_addi16sp_imm())); +} else { + require(insn.rvc_imm() != 0); + WRITE_RD(insn.rvc_imm() << 12); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/c_lw.h b/vendor/riscv-isa-sim/riscv/insns/c_lw.h new file mode 100644 index 00000000..ef49dd90 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_lw.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS2S(MMU.load_int32(RVC_RS1S + insn.rvc_lw_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_lwsp.h b/vendor/riscv-isa-sim/riscv/insns/c_lwsp.h new file mode 100644 index 00000000..b3d74dbf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_lwsp.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_rd() != 0); +WRITE_RD(MMU.load_int32(RVC_SP + insn.rvc_lwsp_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_mv.h b/vendor/riscv-isa-sim/riscv/insns/c_mv.h new file mode 100644 index 00000000..a03d0d07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_mv.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_rs2() != 0); +WRITE_RD(RVC_RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_or.h b/vendor/riscv-isa-sim/riscv/insns/c_or.h new file mode 100644 index 00000000..56436d1a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_or.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(RVC_RS1S | RVC_RS2S); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_slli.h b/vendor/riscv-isa-sim/riscv/insns/c_slli.h new file mode 100644 index 00000000..24fbb133 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_slli.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_zimm() < xlen); +WRITE_RD(sext_xlen(RVC_RS1 << insn.rvc_zimm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_srai.h b/vendor/riscv-isa-sim/riscv/insns/c_srai.h new file mode 100644 index 00000000..f6638b1e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_srai.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_zimm() < xlen); +WRITE_RVC_RS1S(sext_xlen(sext_xlen(RVC_RS1S) >> insn.rvc_zimm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_srli.h b/vendor/riscv-isa-sim/riscv/insns/c_srli.h new file mode 100644 index 00000000..f410fefd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_srli.h @@ -0,0 +1,3 @@ +require_extension('C'); +require(insn.rvc_zimm() < xlen); +WRITE_RVC_RS1S(sext_xlen(zext_xlen(RVC_RS1S) >> insn.rvc_zimm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_sub.h b/vendor/riscv-isa-sim/riscv/insns/c_sub.h new file mode 100644 index 00000000..1b8e3735 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_sub.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(sext_xlen(RVC_RS1S - RVC_RS2S)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_subw.h b/vendor/riscv-isa-sim/riscv/insns/c_subw.h new file mode 100644 index 00000000..580f5b54 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_subw.h @@ -0,0 +1,3 @@ +require_extension('C'); +require_rv64; +WRITE_RVC_RS1S(sext32(RVC_RS1S - RVC_RS2S)); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_sw.h b/vendor/riscv-isa-sim/riscv/insns/c_sw.h new file mode 100644 index 00000000..3073e9d6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_sw.h @@ -0,0 +1,2 @@ +require_extension('C'); +MMU.store_uint32(RVC_RS1S + insn.rvc_lw_imm(), RVC_RS2S); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_swsp.h b/vendor/riscv-isa-sim/riscv/insns/c_swsp.h new file mode 100644 index 00000000..b8995ab0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_swsp.h @@ -0,0 +1,2 @@ +require_extension('C'); +MMU.store_uint32(RVC_SP + insn.rvc_swsp_imm(), RVC_RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/c_xor.h b/vendor/riscv-isa-sim/riscv/insns/c_xor.h new file mode 100644 index 00000000..9981c1af --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/c_xor.h @@ -0,0 +1,2 @@ +require_extension('C'); +WRITE_RVC_RS1S(RVC_RS1S ^ RVC_RS2S); diff --git a/vendor/riscv-isa-sim/riscv/insns/cbo_clean.h b/vendor/riscv-isa-sim/riscv/insns/cbo_clean.h new file mode 100644 index 00000000..201fa447 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cbo_clean.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZICBOM); +DECLARE_XENVCFG_VARS(CBCFE); +require_envcfg(CBCFE); +MMU.clean_inval(RS1, true, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/cbo_flush.h b/vendor/riscv-isa-sim/riscv/insns/cbo_flush.h new file mode 100644 index 00000000..b17f5cf1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cbo_flush.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZICBOM); +DECLARE_XENVCFG_VARS(CBCFE); +require_envcfg(CBCFE); +MMU.clean_inval(RS1, true, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/cbo_inval.h b/vendor/riscv-isa-sim/riscv/insns/cbo_inval.h new file mode 100644 index 00000000..bd80a6fd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cbo_inval.h @@ -0,0 +1,9 @@ +require_extension(EXT_ZICBOM); +DECLARE_XENVCFG_VARS(CBIE); +require_envcfg(CBIE); +if (((STATE.prv != PRV_M) && (mCBIE == 1)) || + ((!STATE.v && (STATE.prv == PRV_U)) && (sCBIE = 1)) || + (STATE.v && ((hCBIE == 1) || ((STATE.prv == PRV_U) && (sCBIE== 0))))) + MMU.clean_inval(RS1, true, true); +else + MMU.clean_inval(RS1, false, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/cbo_zero.h b/vendor/riscv-isa-sim/riscv/insns/cbo_zero.h new file mode 100644 index 00000000..4bbe28d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cbo_zero.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZICBOZ); +DECLARE_XENVCFG_VARS(CBZE); +require_envcfg(CBZE); +MMU.cbo_zero(RS1); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmul.h b/vendor/riscv-isa-sim/riscv/insns/clmul.h new file mode 100644 index 00000000..b8e6d6d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmul.h @@ -0,0 +1,6 @@ +require_either_extension(EXT_ZBC, EXT_ZBKC); +reg_t a = zext_xlen(RS1), b = zext_xlen(RS2), x = 0; +for (int i = 0; i < xlen; i++) + if ((b >> i) & 1) + x ^= a << i; +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulh.h b/vendor/riscv-isa-sim/riscv/insns/clmulh.h new file mode 100644 index 00000000..dfee94e2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulh.h @@ -0,0 +1,6 @@ +require_either_extension(EXT_ZBC, EXT_ZBKC); +reg_t a = zext_xlen(RS1), b = zext_xlen(RS2), x = 0; +for (int i = 1; i < xlen; i++) + if ((b >> i) & 1) + x ^= a >> (xlen-i); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulhw.h b/vendor/riscv-isa-sim/riscv/insns/clmulhw.h new file mode 100644 index 00000000..f41acb0e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulhw.h @@ -0,0 +1,6 @@ +require_extension(EXT_XZBC); +reg_t a = zext32(RS1), b = zext32(RS2), x = 0; +for (int i = 1; i < 32; i++) + if ((b >> i) & 1) + x ^= a >> (32-i); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulr.h b/vendor/riscv-isa-sim/riscv/insns/clmulr.h new file mode 100644 index 00000000..ffa046d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulr.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZBC); +reg_t a = zext_xlen(RS1), b = zext_xlen(RS2), x = 0; +for (int i = 0; i < xlen; i++) + if ((b >> i) & 1) + x ^= a >> (xlen-i-1); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulrw.h b/vendor/riscv-isa-sim/riscv/insns/clmulrw.h new file mode 100644 index 00000000..784859ae --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulrw.h @@ -0,0 +1,6 @@ +require_extension(EXT_XZBC); +reg_t a = zext32(RS1), b = zext32(RS2), x = 0; +for (int i = 0; i < 32; i++) + if ((b >> i) & 1) + x ^= a >> (31-i); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clmulw.h b/vendor/riscv-isa-sim/riscv/insns/clmulw.h new file mode 100644 index 00000000..5bb753fe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clmulw.h @@ -0,0 +1,6 @@ +require_extension(EXT_XZBC); +reg_t a = zext32(RS1), b = zext32(RS2), x = 0; +for (int i = 0; i < 32; i++) + if ((b >> i) & 1) + x ^= a << i; +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clo16.h b/vendor/riscv-isa-sim/riscv/insns/clo16.h new file mode 100644 index 00000000..9da65993 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clo16.h @@ -0,0 +1,11 @@ +P_ONE_LOOP(16, { + pd = 0; + ps1 = ~ps1; + if (!ps1) pd = 16; + else { + if ((ps1 & 0xFF00) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x8000) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clo32.h b/vendor/riscv-isa-sim/riscv/insns/clo32.h new file mode 100644 index 00000000..431bb0e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clo32.h @@ -0,0 +1,12 @@ +P_ONE_LOOP(32, { + pd = 0; + ps1 = ~ps1; + if (!ps1) pd = 32; + else { + if ((ps1 & 0xFFFF0000) == 0) { pd += 16; ps1 <<= 16; } + if ((ps1 & 0xFF000000) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF0000000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0000000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80000000) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clo8.h b/vendor/riscv-isa-sim/riscv/insns/clo8.h new file mode 100644 index 00000000..2581adec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clo8.h @@ -0,0 +1,10 @@ +P_ONE_LOOP(8, { + pd = 0; + ps1 = ~ps1; + if (!ps1) pd = 8; + else { + if ((ps1 & 0xF0) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clrs16.h b/vendor/riscv-isa-sim/riscv/insns/clrs16.h new file mode 100644 index 00000000..65412629 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clrs16.h @@ -0,0 +1,12 @@ +P_ONE_LOOP(16, { + pd = 0; + if (ps1 < 0) ps1 = ~ps1; + if (!ps1) pd = 16; + else { + if ((ps1 & 0xFF00) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x8000) == 0) { pd += 1; } + } + pd -= 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clrs32.h b/vendor/riscv-isa-sim/riscv/insns/clrs32.h new file mode 100644 index 00000000..c75db180 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clrs32.h @@ -0,0 +1,13 @@ +P_ONE_LOOP(32, { + pd = 0; + if (ps1 < 0) ps1 = ~ps1; + if (!ps1) pd = 32; + else { + if ((ps1 & 0xFFFF0000) == 0) { pd += 16; ps1 <<= 16; } + if ((ps1 & 0xFF000000) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF0000000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0000000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80000000) == 0) { pd += 1; } + } + pd -= 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clrs8.h b/vendor/riscv-isa-sim/riscv/insns/clrs8.h new file mode 100644 index 00000000..f6f82987 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clrs8.h @@ -0,0 +1,11 @@ +P_ONE_LOOP(8, { + pd = 0; + if (ps1 < 0) ps1 = ~ps1; + if (!ps1) pd = 8; + else { + if ((ps1 & 0xF0) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80) == 0) { pd += 1; } + } + pd -= 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clz.h b/vendor/riscv-isa-sim/riscv/insns/clz.h new file mode 100644 index 00000000..e10e4d2d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clz.h @@ -0,0 +1,5 @@ +require_either_extension(xlen == 32 ? EXT_ZBPBO : EXT_ZBB, EXT_ZBB); +reg_t x = xlen; +for (int i = 0; i < xlen; i++) + if (1 & (RS1 >> (xlen-i-1))) { x = i; break; } +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/clz16.h b/vendor/riscv-isa-sim/riscv/insns/clz16.h new file mode 100644 index 00000000..a129d59a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clz16.h @@ -0,0 +1,10 @@ +P_ONE_LOOP(16, { + pd = 0; + if (ps1 == 0) pd = 16; + else { + if ((ps1 & 0xFF00) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x8000) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clz32.h b/vendor/riscv-isa-sim/riscv/insns/clz32.h new file mode 100644 index 00000000..a38dda76 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clz32.h @@ -0,0 +1,12 @@ +require_rv64; +P_ONE_LOOP(32, { + pd = 0; + if (ps1 == 0) pd = 32; + else { + if ((ps1 & 0xFFFF0000) == 0) { pd += 16; ps1 <<= 16; } + if ((ps1 & 0xFF000000) == 0) { pd += 8; ps1 <<= 8; } + if ((ps1 & 0xF0000000) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0000000) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80000000) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clz8.h b/vendor/riscv-isa-sim/riscv/insns/clz8.h new file mode 100644 index 00000000..78ff6b7b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clz8.h @@ -0,0 +1,9 @@ +P_ONE_LOOP(8, { + pd = 0; + if (ps1 == 0) pd = 8; + else { + if ((ps1 & 0xF0) == 0) { pd += 4; ps1 <<= 4; } + if ((ps1 & 0xC0) == 0) { pd += 2; ps1 <<= 2; } + if ((ps1 & 0x80) == 0) { pd += 1; } + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/clzw.h b/vendor/riscv-isa-sim/riscv/insns/clzw.h new file mode 100644 index 00000000..46816e77 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/clzw.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_ZBB); +reg_t x = 32; +for (int i = 0; i < 32; i++) + if (1 & (RS1 >> (31-i))) { x = i; break; } +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/cmix.h b/vendor/riscv-isa-sim/riscv/insns/cmix.h new file mode 100644 index 00000000..98eb0bca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cmix.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBPBO, EXT_XZBT); +WRITE_RD((RS1 & RS2) | (RS3 & ~RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/cmov.h b/vendor/riscv-isa-sim/riscv/insns/cmov.h new file mode 100644 index 00000000..c7551bc6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cmov.h @@ -0,0 +1,2 @@ +require_extension(EXT_XZBT); +WRITE_RD(RS2 ? RS1 : RS3); diff --git a/vendor/riscv-isa-sim/riscv/insns/cmpeq16.h b/vendor/riscv-isa-sim/riscv/insns/cmpeq16.h new file mode 100644 index 00000000..4fb6faab --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cmpeq16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 == ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/cmpeq8.h b/vendor/riscv-isa-sim/riscv/insns/cmpeq8.h new file mode 100644 index 00000000..fba1bf6d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cmpeq8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 == ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/cpop.h b/vendor/riscv-isa-sim/riscv/insns/cpop.h new file mode 100644 index 00000000..1f5c3ef8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cpop.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZBB); +reg_t x = 0; +for (int i = 0; i < xlen; i++) + if (1 & (RS1 >> i)) x++; +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/cpopw.h b/vendor/riscv-isa-sim/riscv/insns/cpopw.h new file mode 100644 index 00000000..41383985 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cpopw.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_ZBB); +reg_t x = 0; +for (int i = 0; i < 32; i++) + if (1 & (RS1 >> i)) x++; +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/cras16.h b/vendor/riscv-isa-sim/riscv/insns/cras16.h new file mode 100644 index 00000000..6717e099 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cras16.h @@ -0,0 +1,5 @@ +P_CROSS_LOOP(16, { + pd = ps1 + ps2; +}, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/cras32.h b/vendor/riscv-isa-sim/riscv/insns/cras32.h new file mode 100644 index 00000000..8f53e98b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/cras32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_LOOP(32, { + pd = ps1 + ps2; +}, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32_b.h b/vendor/riscv-isa-sim/riscv/insns/crc32_b.h new file mode 100644 index 00000000..3111fe57 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32_b.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 8; i++) + x = (x >> 1) ^ (0xEDB88320 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32_d.h b/vendor/riscv-isa-sim/riscv/insns/crc32_d.h new file mode 100644 index 00000000..7fd7a38f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32_d.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 64; i++) + x = (x >> 1) ^ (0xEDB88320 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32_h.h b/vendor/riscv-isa-sim/riscv/insns/crc32_h.h new file mode 100644 index 00000000..5063fefd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 16; i++) + x = (x >> 1) ^ (0xEDB88320 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32_w.h b/vendor/riscv-isa-sim/riscv/insns/crc32_w.h new file mode 100644 index 00000000..6e425ab8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32_w.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 32; i++) + x = (x >> 1) ^ (0xEDB88320 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32c_b.h b/vendor/riscv-isa-sim/riscv/insns/crc32c_b.h new file mode 100644 index 00000000..d11b0dda --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32c_b.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 8; i++) + x = (x >> 1) ^ (0x82F63B78 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32c_d.h b/vendor/riscv-isa-sim/riscv/insns/crc32c_d.h new file mode 100644 index 00000000..81175fd9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32c_d.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 64; i++) + x = (x >> 1) ^ (0x82F63B78 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32c_h.h b/vendor/riscv-isa-sim/riscv/insns/crc32c_h.h new file mode 100644 index 00000000..ef5817d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32c_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 16; i++) + x = (x >> 1) ^ (0x82F63B78 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crc32c_w.h b/vendor/riscv-isa-sim/riscv/insns/crc32c_w.h new file mode 100644 index 00000000..87935402 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crc32c_w.h @@ -0,0 +1,5 @@ +require_extension(EXT_XZBR); +reg_t x = zext_xlen(RS1); +for (int i = 0; i < 32; i++) + x = (x >> 1) ^ (0x82F63B78 & ~((x&1)-1)); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/crsa16.h b/vendor/riscv-isa-sim/riscv/insns/crsa16.h new file mode 100644 index 00000000..2c1997ac --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crsa16.h @@ -0,0 +1,5 @@ +P_CROSS_LOOP(16, { + pd = ps1 - ps2; +}, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/crsa32.h b/vendor/riscv-isa-sim/riscv/insns/crsa32.h new file mode 100644 index 00000000..4290e9ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/crsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_LOOP(32, { + pd = (int64_t)ps1 - ps2; +}, { + pd = (int64_t)ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrc.h b/vendor/riscv-isa-sim/riscv/insns/csrrc.h new file mode 100644 index 00000000..019a9ce4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrc.h @@ -0,0 +1,8 @@ +bool write = insn.rs1() != 0; +int csr = validate_csr(insn.csr(), write); +reg_t old = p->get_csr(csr, insn, write); +if (write) { + p->put_csr(csr, old & ~RS1); +} +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrci.h b/vendor/riscv-isa-sim/riscv/insns/csrrci.h new file mode 100644 index 00000000..f02d3268 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrci.h @@ -0,0 +1,8 @@ +bool write = insn.rs1() != 0; +int csr = validate_csr(insn.csr(), write); +reg_t old = p->get_csr(csr, insn, write); +if (write) { + p->put_csr(csr, old & ~(reg_t)insn.rs1()); +} +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrs.h b/vendor/riscv-isa-sim/riscv/insns/csrrs.h new file mode 100644 index 00000000..7632d1f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrs.h @@ -0,0 +1,8 @@ +bool write = insn.rs1() != 0; +int csr = validate_csr(insn.csr(), write); +reg_t old = p->get_csr(csr, insn, write); +if (write) { + p->put_csr(csr, old | RS1); +} +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrsi.h b/vendor/riscv-isa-sim/riscv/insns/csrrsi.h new file mode 100644 index 00000000..9acfcfcf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrsi.h @@ -0,0 +1,8 @@ +bool write = insn.rs1() != 0; +int csr = validate_csr(insn.csr(), write); +reg_t old = p->get_csr(csr, insn, write); +if (write) { + p->put_csr(csr, old | insn.rs1()); +} +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrw.h b/vendor/riscv-isa-sim/riscv/insns/csrrw.h new file mode 100644 index 00000000..e4c605bd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrw.h @@ -0,0 +1,5 @@ +int csr = validate_csr(insn.csr(), true); +reg_t old = p->get_csr(csr, insn, true); +p->put_csr(csr, RS1); +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/csrrwi.h b/vendor/riscv-isa-sim/riscv/insns/csrrwi.h new file mode 100644 index 00000000..77fec154 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/csrrwi.h @@ -0,0 +1,5 @@ +int csr = validate_csr(insn.csr(), true); +reg_t old = p->get_csr(csr, insn, true); +p->put_csr(csr, insn.rs1()); +WRITE_RD(sext_xlen(old)); +serialize(); diff --git a/vendor/riscv-isa-sim/riscv/insns/ctz.h b/vendor/riscv-isa-sim/riscv/insns/ctz.h new file mode 100644 index 00000000..25d37239 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ctz.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZBB); +reg_t x = xlen; +for (int i = 0; i < xlen; i++) + if (1 & (RS1 >> i)) { x = i; break; } +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ctzw.h b/vendor/riscv-isa-sim/riscv/insns/ctzw.h new file mode 100644 index 00000000..aca46e9d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ctzw.h @@ -0,0 +1,6 @@ +require_rv64; +require_extension(EXT_ZBB); +reg_t x = 32; +for (int i = 0; i < 32; i++) + if (1 & (RS1 >> i)) { x = i; break; } +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/div.h b/vendor/riscv-isa-sim/riscv/insns/div.h new file mode 100644 index 00000000..9cbe8d6b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/div.h @@ -0,0 +1,9 @@ +require_extension('M'); +sreg_t lhs = sext_xlen(RS1); +sreg_t rhs = sext_xlen(RS2); +if(rhs == 0) + WRITE_RD(UINT64_MAX); +else if(lhs == INT64_MIN && rhs == -1) + WRITE_RD(lhs); +else + WRITE_RD(sext_xlen(lhs / rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/divu.h b/vendor/riscv-isa-sim/riscv/insns/divu.h new file mode 100644 index 00000000..31d75856 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/divu.h @@ -0,0 +1,7 @@ +require_extension('M'); +reg_t lhs = zext_xlen(RS1); +reg_t rhs = zext_xlen(RS2); +if(rhs == 0) + WRITE_RD(UINT64_MAX); +else + WRITE_RD(sext_xlen(lhs / rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/divuw.h b/vendor/riscv-isa-sim/riscv/insns/divuw.h new file mode 100644 index 00000000..e127619a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/divuw.h @@ -0,0 +1,8 @@ +require_extension('M'); +require_rv64; +reg_t lhs = zext32(RS1); +reg_t rhs = zext32(RS2); +if(rhs == 0) + WRITE_RD(UINT64_MAX); +else + WRITE_RD(sext32(lhs / rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/divw.h b/vendor/riscv-isa-sim/riscv/insns/divw.h new file mode 100644 index 00000000..11be17e4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/divw.h @@ -0,0 +1,8 @@ +require_extension('M'); +require_rv64; +sreg_t lhs = sext32(RS1); +sreg_t rhs = sext32(RS2); +if(rhs == 0) + WRITE_RD(UINT64_MAX); +else + WRITE_RD(sext32(lhs / rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/dret.h b/vendor/riscv-isa-sim/riscv/insns/dret.h new file mode 100644 index 00000000..01a39923 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/dret.h @@ -0,0 +1,9 @@ +require(STATE.debug_mode); +set_pc_and_serialize(STATE.dpc->read()); +p->set_privilege(STATE.dcsr->prv); + +/* We're not in Debug Mode anymore. */ +STATE.debug_mode = false; + +if (STATE.dcsr->step) + STATE.single_step = STATE.STEP_STEPPING; diff --git a/vendor/riscv-isa-sim/riscv/insns/ebreak.h b/vendor/riscv-isa-sim/riscv/insns/ebreak.h new file mode 100644 index 00000000..9f3d44d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ebreak.h @@ -0,0 +1 @@ +throw trap_breakpoint(STATE.v, pc); diff --git a/vendor/riscv-isa-sim/riscv/insns/ecall.h b/vendor/riscv-isa-sim/riscv/insns/ecall.h new file mode 100644 index 00000000..e6c723f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ecall.h @@ -0,0 +1,11 @@ +switch (STATE.prv) +{ + case PRV_U: throw trap_user_ecall(); + case PRV_S: + if (STATE.v) + throw trap_virtual_supervisor_ecall(); + else + throw trap_supervisor_ecall(); + case PRV_M: throw trap_machine_ecall(); + default: abort(); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/fadd_d.h b/vendor/riscv-isa-sim/riscv/insns/fadd_d.h new file mode 100644 index 00000000..4a436e24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fadd_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_add(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fadd_h.h b/vendor/riscv-isa-sim/riscv/insns/fadd_h.h new file mode 100644 index 00000000..2b646ae7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fadd_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_add(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fadd_q.h b/vendor/riscv-isa-sim/riscv/insns/fadd_q.h new file mode 100644 index 00000000..1139a74d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fadd_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_add(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fadd_s.h b/vendor/riscv-isa-sim/riscv/insns/fadd_s.h new file mode 100644 index 00000000..cc18d58c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fadd_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_add(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fclass_d.h b/vendor/riscv-isa-sim/riscv/insns/fclass_d.h new file mode 100644 index 00000000..9456123d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fclass_d.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_RD(f64_classify(f64(FRS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fclass_h.h b/vendor/riscv-isa-sim/riscv/insns/fclass_h.h new file mode 100644 index 00000000..066a2d24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fclass_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_RD(f16_classify(f16(FRS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fclass_q.h b/vendor/riscv-isa-sim/riscv/insns/fclass_q.h new file mode 100644 index 00000000..53307582 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fclass_q.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_RD(f128_classify(f128(FRS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fclass_s.h b/vendor/riscv-isa-sim/riscv/insns/fclass_s.h new file mode 100644 index 00000000..a392db88 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fclass_s.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_RD(f32_classify(f32(FRS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_h.h new file mode 100644 index 00000000..04e9ff4e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_h.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFHMIN); +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_to_f64(f16(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_l.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_l.h new file mode 100644 index 00000000..08716cff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_l.h @@ -0,0 +1,6 @@ +require_extension('D'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i64_to_f64(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_lu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_lu.h new file mode 100644 index 00000000..306d7fed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_lu.h @@ -0,0 +1,6 @@ +require_extension('D'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui64_to_f64(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_q.h new file mode 100644 index 00000000..b50a43d0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_to_f64(f128(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_s.h new file mode 100644 index 00000000..5f805b06 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_s.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_to_f64(f32(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_w.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_w.h new file mode 100644 index 00000000..4c4861c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_w.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i32_to_f64((int32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_d_wu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_wu.h new file mode 100644 index 00000000..1dbf218a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_d_wu.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui32_to_f64((uint32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_d.h new file mode 100644 index 00000000..e9987b7f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_d.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFHMIN); +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_to_f16(f64(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_l.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_l.h new file mode 100644 index 00000000..39178c2f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_l.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFH); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i64_to_f16(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_lu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_lu.h new file mode 100644 index 00000000..a872c480 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_lu.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFH); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui64_to_f16(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_q.h new file mode 100644 index 00000000..4dfdd536 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_q.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFHMIN); +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_to_f16(f128(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_s.h new file mode 100644 index 00000000..ce39d814 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_s.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFHMIN); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_to_f16(f32(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_w.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_w.h new file mode 100644 index 00000000..c0824545 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_w.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i32_to_f16((int32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_h_wu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_wu.h new file mode 100644 index 00000000..9f2f5f6a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_h_wu.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui32_to_f16((uint32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_l_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_d.h new file mode 100644 index 00000000..c09e6c44 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_d.h @@ -0,0 +1,6 @@ +require_extension('D'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f64_to_i64(f64(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_l_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_h.h new file mode 100644 index 00000000..5a1fea85 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_h.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFH); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f16_to_i64(f16(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_l_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_q.h new file mode 100644 index 00000000..b28bca23 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_q.h @@ -0,0 +1,6 @@ +require_extension('Q'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f128_to_i64(f128(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_l_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_s.h new file mode 100644 index 00000000..267e0eb8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_l_s.h @@ -0,0 +1,6 @@ +require_extension('F'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f32_to_i64(f32(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_d.h new file mode 100644 index 00000000..3a021204 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_d.h @@ -0,0 +1,6 @@ +require_extension('D'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f64_to_ui64(f64(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_h.h new file mode 100644 index 00000000..f1454c3e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_h.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFH); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f16_to_ui64(f16(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_q.h new file mode 100644 index 00000000..8c5be7c6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_q.h @@ -0,0 +1,6 @@ +require_extension('Q'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f128_to_ui64(f128(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_s.h new file mode 100644 index 00000000..94115a3f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_lu_s.h @@ -0,0 +1,6 @@ +require_extension('F'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(f32_to_ui64(f32(FRS1), RM, true)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_d.h new file mode 100644 index 00000000..c2437b12 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_d.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_to_f128(f64(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_h.h new file mode 100644 index 00000000..8bf16ce4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_h.h @@ -0,0 +1,6 @@ +require_extension(EXT_ZFHMIN); +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_to_f128(f16(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_l.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_l.h new file mode 100644 index 00000000..f1f45ca3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_l.h @@ -0,0 +1,6 @@ +require_extension('Q'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i64_to_f128(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_lu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_lu.h new file mode 100644 index 00000000..850212e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_lu.h @@ -0,0 +1,6 @@ +require_extension('Q'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui64_to_f128(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_s.h new file mode 100644 index 00000000..79e6bb6f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_s.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_to_f128(f32(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_w.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_w.h new file mode 100644 index 00000000..fb83f15d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_w.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i32_to_f128((int32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_q_wu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_wu.h new file mode 100644 index 00000000..7c2ae97e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_q_wu.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui32_to_f128((uint32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_d.h new file mode 100644 index 00000000..40333359 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_to_f32(f64(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_h.h new file mode 100644 index 00000000..22cdd728 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFHMIN); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_to_f32(f16(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_l.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_l.h new file mode 100644 index 00000000..9abcc805 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_l.h @@ -0,0 +1,6 @@ +require_extension('F'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i64_to_f32(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_lu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_lu.h new file mode 100644 index 00000000..70c676ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_lu.h @@ -0,0 +1,6 @@ +require_extension('F'); +require_rv64; +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui64_to_f32(RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_q.h new file mode 100644 index 00000000..b0f118ec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_to_f32(f128(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_w.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_w.h new file mode 100644 index 00000000..1ddabd87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_w.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(i32_to_f32((int32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_s_wu.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_wu.h new file mode 100644 index 00000000..c1394c3f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_s_wu.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(ui32_to_f32((uint32_t)RS1)); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_w_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_d.h new file mode 100644 index 00000000..28eb2456 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f64_to_i32(f64(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_w_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_h.h new file mode 100644 index 00000000..fe8bb48f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f16_to_i32(f16(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_w_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_q.h new file mode 100644 index 00000000..e10bafc9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f128_to_i32(f128(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_w_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_s.h new file mode 100644 index 00000000..d30f1b44 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_w_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f32_to_i32(f32(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_d.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_d.h new file mode 100644 index 00000000..5cdc004c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f64_to_ui32(f64(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_h.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_h.h new file mode 100644 index 00000000..bf6648d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f16_to_ui32(f16(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_q.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_q.h new file mode 100644 index 00000000..c391dc87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f128_to_ui32(f128(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_s.h b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_s.h new file mode 100644 index 00000000..034d6816 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fcvt_wu_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_RD(sext32(f32_to_ui32(f32(FRS1), RM, true))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fdiv_d.h b/vendor/riscv-isa-sim/riscv/insns/fdiv_d.h new file mode 100644 index 00000000..ae7911ae --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fdiv_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_div(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fdiv_h.h b/vendor/riscv-isa-sim/riscv/insns/fdiv_h.h new file mode 100644 index 00000000..a169eae8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fdiv_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_div(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fdiv_q.h b/vendor/riscv-isa-sim/riscv/insns/fdiv_q.h new file mode 100644 index 00000000..22048317 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fdiv_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_div(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fdiv_s.h b/vendor/riscv-isa-sim/riscv/insns/fdiv_s.h new file mode 100644 index 00000000..c74ff041 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fdiv_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_div(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fence.h b/vendor/riscv-isa-sim/riscv/insns/fence.h new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/riscv/insns/fence_i.h b/vendor/riscv-isa-sim/riscv/insns/fence_i.h new file mode 100644 index 00000000..38dcaf3f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fence_i.h @@ -0,0 +1 @@ +MMU.flush_icache(); diff --git a/vendor/riscv-isa-sim/riscv/insns/feq_d.h b/vendor/riscv-isa-sim/riscv/insns/feq_d.h new file mode 100644 index 00000000..541ed5bb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/feq_d.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_fp; +WRITE_RD(f64_eq(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/feq_h.h b/vendor/riscv-isa-sim/riscv/insns/feq_h.h new file mode 100644 index 00000000..47e75a5b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/feq_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_RD(f16_eq(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/feq_q.h b/vendor/riscv-isa-sim/riscv/insns/feq_q.h new file mode 100644 index 00000000..cee2da95 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/feq_q.h @@ -0,0 +1,4 @@ +require_extension('Q'); +require_fp; +WRITE_RD(f128_eq(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/feq_s.h b/vendor/riscv-isa-sim/riscv/insns/feq_s.h new file mode 100644 index 00000000..489bea69 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/feq_s.h @@ -0,0 +1,4 @@ +require_extension('F'); +require_fp; +WRITE_RD(f32_eq(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fld.h b/vendor/riscv-isa-sim/riscv/insns/fld.h new file mode 100644 index 00000000..4dea1d47 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fld.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_FRD(f64(MMU.load_uint64(RS1 + insn.i_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fle_d.h b/vendor/riscv-isa-sim/riscv/insns/fle_d.h new file mode 100644 index 00000000..419a36fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fle_d.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_fp; +WRITE_RD(f64_le(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fle_h.h b/vendor/riscv-isa-sim/riscv/insns/fle_h.h new file mode 100644 index 00000000..9fc59685 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fle_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_RD(f16_le(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fle_q.h b/vendor/riscv-isa-sim/riscv/insns/fle_q.h new file mode 100644 index 00000000..8368af9d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fle_q.h @@ -0,0 +1,4 @@ +require_extension('Q'); +require_fp; +WRITE_RD(f128_le(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fle_s.h b/vendor/riscv-isa-sim/riscv/insns/fle_s.h new file mode 100644 index 00000000..5c0124ef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fle_s.h @@ -0,0 +1,4 @@ +require_extension('F'); +require_fp; +WRITE_RD(f32_le(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flh.h b/vendor/riscv-isa-sim/riscv/insns/flh.h new file mode 100644 index 00000000..bdb22d3e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flh.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFHMIN); +require_fp; +WRITE_FRD(f16(MMU.load_uint16(RS1 + insn.i_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/flq.h b/vendor/riscv-isa-sim/riscv/insns/flq.h new file mode 100644 index 00000000..81d225cd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flq.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_FRD(MMU.load_float128(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/flt_d.h b/vendor/riscv-isa-sim/riscv/insns/flt_d.h new file mode 100644 index 00000000..7176a961 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flt_d.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_fp; +WRITE_RD(f64_lt(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flt_h.h b/vendor/riscv-isa-sim/riscv/insns/flt_h.h new file mode 100644 index 00000000..f516a38a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flt_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_RD(f16_lt(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flt_q.h b/vendor/riscv-isa-sim/riscv/insns/flt_q.h new file mode 100644 index 00000000..c4521418 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flt_q.h @@ -0,0 +1,4 @@ +require_extension('Q'); +require_fp; +WRITE_RD(f128_lt(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flt_s.h b/vendor/riscv-isa-sim/riscv/insns/flt_s.h new file mode 100644 index 00000000..40acc34b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flt_s.h @@ -0,0 +1,4 @@ +require_extension('F'); +require_fp; +WRITE_RD(f32_lt(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/flw.h b/vendor/riscv-isa-sim/riscv/insns/flw.h new file mode 100644 index 00000000..61297544 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/flw.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(f32(MMU.load_uint32(RS1 + insn.i_imm()))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmadd_d.h b/vendor/riscv-isa-sim/riscv/insns/fmadd_d.h new file mode 100644 index 00000000..ab22bebb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmadd_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mulAdd(f64(FRS1), f64(FRS2), f64(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmadd_h.h b/vendor/riscv-isa-sim/riscv/insns/fmadd_h.h new file mode 100644 index 00000000..6551de5e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmadd_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mulAdd(f16(FRS1), f16(FRS2), f16(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmadd_q.h b/vendor/riscv-isa-sim/riscv/insns/fmadd_q.h new file mode 100644 index 00000000..882dfc1d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmadd_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mulAdd(f128(FRS1), f128(FRS2), f128(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmadd_s.h b/vendor/riscv-isa-sim/riscv/insns/fmadd_s.h new file mode 100644 index 00000000..e919190c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmadd_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mulAdd(f32(FRS1), f32(FRS2), f32(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmax_d.h b/vendor/riscv-isa-sim/riscv/insns/fmax_d.h new file mode 100644 index 00000000..11491f54 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmax_d.h @@ -0,0 +1,9 @@ +require_extension('D'); +require_fp; +bool greater = f64_lt_quiet(f64(FRS2), f64(FRS1)) || + (f64_eq(f64(FRS2), f64(FRS1)) && (f64(FRS2).v & F64_SIGN)); +if (isNaNF64UI(f64(FRS1).v) && isNaNF64UI(f64(FRS2).v)) + WRITE_FRD(f64(defaultNaNF64UI)); +else + WRITE_FRD(greater || isNaNF64UI(f64(FRS2).v) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmax_h.h b/vendor/riscv-isa-sim/riscv/insns/fmax_h.h new file mode 100644 index 00000000..3d4c40eb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmax_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(f16_max(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmax_q.h b/vendor/riscv-isa-sim/riscv/insns/fmax_q.h new file mode 100644 index 00000000..7dd7884a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmax_q.h @@ -0,0 +1,9 @@ +require_extension('Q'); +require_fp; +bool greater = f128_lt_quiet(f128(FRS2), f128(FRS1)) || + (f128_eq(f128(FRS2), f128(FRS1)) && (f128(FRS2).v[1] & F64_SIGN)); +if (isNaNF128(f128(FRS1)) && isNaNF128(f128(FRS2))) + WRITE_FRD(f128(defaultNaNF128())); +else + WRITE_FRD(greater || isNaNF128(f128(FRS2)) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmax_s.h b/vendor/riscv-isa-sim/riscv/insns/fmax_s.h new file mode 100644 index 00000000..41d8f921 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmax_s.h @@ -0,0 +1,9 @@ +require_extension('F'); +require_fp; +bool greater = f32_lt_quiet(f32(FRS2), f32(FRS1)) || + (f32_eq(f32(FRS2), f32(FRS1)) && (f32(FRS2).v & F32_SIGN)); +if (isNaNF32UI(f32(FRS1).v) && isNaNF32UI(f32(FRS2).v)) + WRITE_FRD(f32(defaultNaNF32UI)); +else + WRITE_FRD(greater || isNaNF32UI(f32(FRS2).v) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmin_d.h b/vendor/riscv-isa-sim/riscv/insns/fmin_d.h new file mode 100644 index 00000000..5cf349d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmin_d.h @@ -0,0 +1,9 @@ +require_extension('D'); +require_fp; +bool less = f64_lt_quiet(f64(FRS1), f64(FRS2)) || + (f64_eq(f64(FRS1), f64(FRS2)) && (f64(FRS1).v & F64_SIGN)); +if (isNaNF64UI(f64(FRS1).v) && isNaNF64UI(f64(FRS2).v)) + WRITE_FRD(f64(defaultNaNF64UI)); +else + WRITE_FRD(less || isNaNF64UI(f64(FRS2).v) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmin_h.h b/vendor/riscv-isa-sim/riscv/insns/fmin_h.h new file mode 100644 index 00000000..5fb1404f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmin_h.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(f16_min(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmin_q.h b/vendor/riscv-isa-sim/riscv/insns/fmin_q.h new file mode 100644 index 00000000..fcb9526e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmin_q.h @@ -0,0 +1,9 @@ +require_extension('Q'); +require_fp; +bool less = f128_lt_quiet(f128(FRS1), f128(FRS2)) || + (f128_eq(f128(FRS1), f128(FRS2)) && (f128(FRS1).v[1] & F64_SIGN)); +if (isNaNF128(f128(FRS1)) && isNaNF128(f128(FRS2))) + WRITE_FRD(f128(defaultNaNF128())); +else + WRITE_FRD(less || isNaNF128(f128(FRS2)) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmin_s.h b/vendor/riscv-isa-sim/riscv/insns/fmin_s.h new file mode 100644 index 00000000..19e11938 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmin_s.h @@ -0,0 +1,9 @@ +require_extension('F'); +require_fp; +bool less = f32_lt_quiet(f32(FRS1), f32(FRS2)) || + (f32_eq(f32(FRS1), f32(FRS2)) && (f32(FRS1).v & F32_SIGN)); +if (isNaNF32UI(f32(FRS1).v) && isNaNF32UI(f32(FRS2).v)) + WRITE_FRD(f32(defaultNaNF32UI)); +else + WRITE_FRD(less || isNaNF32UI(f32(FRS2).v) ? FRS1 : FRS2); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmsub_d.h b/vendor/riscv-isa-sim/riscv/insns/fmsub_d.h new file mode 100644 index 00000000..5b5bc0f7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmsub_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mulAdd(f64(FRS1), f64(FRS2), f64(f64(FRS3).v ^ F64_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmsub_h.h b/vendor/riscv-isa-sim/riscv/insns/fmsub_h.h new file mode 100644 index 00000000..934291fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmsub_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mulAdd(f16(FRS1), f16(FRS2), f16(f16(FRS3).v ^ F16_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmsub_q.h b/vendor/riscv-isa-sim/riscv/insns/fmsub_q.h new file mode 100644 index 00000000..1bb96c27 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmsub_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mulAdd(f128(FRS1), f128(FRS2), f128_negate(f128(FRS3)))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmsub_s.h b/vendor/riscv-isa-sim/riscv/insns/fmsub_s.h new file mode 100644 index 00000000..d46c887e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmsub_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mulAdd(f32(FRS1), f32(FRS2), f32(f32(FRS3).v ^ F32_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmul_d.h b/vendor/riscv-isa-sim/riscv/insns/fmul_d.h new file mode 100644 index 00000000..9189d8d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmul_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mul(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmul_h.h b/vendor/riscv-isa-sim/riscv/insns/fmul_h.h new file mode 100644 index 00000000..0152df8f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmul_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mul(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmul_q.h b/vendor/riscv-isa-sim/riscv/insns/fmul_q.h new file mode 100644 index 00000000..66f5a05c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmul_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mul(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmul_s.h b/vendor/riscv-isa-sim/riscv/insns/fmul_s.h new file mode 100644 index 00000000..145d5ce4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmul_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mul(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_d_x.h b/vendor/riscv-isa-sim/riscv/insns/fmv_d_x.h new file mode 100644 index 00000000..0bff5fb7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_d_x.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_rv64; +require_fp; +WRITE_FRD(f64(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_h_x.h b/vendor/riscv-isa-sim/riscv/insns/fmv_h_x.h new file mode 100644 index 00000000..e55d607b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_h_x.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFHMIN); +require_fp; +WRITE_FRD(f16(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_w_x.h b/vendor/riscv-isa-sim/riscv/insns/fmv_w_x.h new file mode 100644 index 00000000..5f713231 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_w_x.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(f32(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_x_d.h b/vendor/riscv-isa-sim/riscv/insns/fmv_x_d.h new file mode 100644 index 00000000..e1a23f48 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_x_d.h @@ -0,0 +1,4 @@ +require_extension('D'); +require_rv64; +require_fp; +WRITE_RD(FRS1.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_x_h.h b/vendor/riscv-isa-sim/riscv/insns/fmv_x_h.h new file mode 100644 index 00000000..7a2e5ff6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_x_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFHMIN); +require_fp; +WRITE_RD(sext32((int16_t)(FRS1.v[0]))); diff --git a/vendor/riscv-isa-sim/riscv/insns/fmv_x_w.h b/vendor/riscv-isa-sim/riscv/insns/fmv_x_w.h new file mode 100644 index 00000000..6754f869 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fmv_x_w.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_RD(sext32(FRS1.v[0])); diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmadd_d.h b/vendor/riscv-isa-sim/riscv/insns/fnmadd_d.h new file mode 100644 index 00000000..e8dd7432 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmadd_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mulAdd(f64(f64(FRS1).v ^ F64_SIGN), f64(FRS2), f64(f64(FRS3).v ^ F64_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmadd_h.h b/vendor/riscv-isa-sim/riscv/insns/fnmadd_h.h new file mode 100644 index 00000000..e4c619e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmadd_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mulAdd(f16(f16(FRS1).v ^ F16_SIGN), f16(FRS2), f16(f16(FRS3).v ^ F16_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmadd_q.h b/vendor/riscv-isa-sim/riscv/insns/fnmadd_q.h new file mode 100644 index 00000000..a36ce188 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmadd_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mulAdd(f128_negate(f128(FRS1)), f128(FRS2), f128_negate(f128(FRS3)))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmadd_s.h b/vendor/riscv-isa-sim/riscv/insns/fnmadd_s.h new file mode 100644 index 00000000..1c2996e3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmadd_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mulAdd(f32(f32(FRS1).v ^ F32_SIGN), f32(FRS2), f32(f32(FRS3).v ^ F32_SIGN))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmsub_d.h b/vendor/riscv-isa-sim/riscv/insns/fnmsub_d.h new file mode 100644 index 00000000..c29a0b93 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmsub_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_mulAdd(f64(f64(FRS1).v ^ F64_SIGN), f64(FRS2), f64(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmsub_h.h b/vendor/riscv-isa-sim/riscv/insns/fnmsub_h.h new file mode 100644 index 00000000..0410c3bb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmsub_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_mulAdd(f16(f16(FRS1).v ^ F16_SIGN), f16(FRS2), f16(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmsub_q.h b/vendor/riscv-isa-sim/riscv/insns/fnmsub_q.h new file mode 100644 index 00000000..130b4ce3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmsub_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_mulAdd(f128_negate(f128(FRS1)), f128(FRS2), f128(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fnmsub_s.h b/vendor/riscv-isa-sim/riscv/insns/fnmsub_s.h new file mode 100644 index 00000000..4c61fc7c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fnmsub_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_mulAdd(f32(f32(FRS1).v ^ F32_SIGN), f32(FRS2), f32(FRS3))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsd.h b/vendor/riscv-isa-sim/riscv/insns/fsd.h new file mode 100644 index 00000000..38c702b7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsd.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +MMU.store_uint64(RS1 + insn.s_imm(), FRS2.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnj_d.h b/vendor/riscv-isa-sim/riscv/insns/fsgnj_d.h new file mode 100644 index 00000000..78f9ce78 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnj_d.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_FRD(fsgnj64(FRS1, FRS2, false, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnj_h.h b/vendor/riscv-isa-sim/riscv/insns/fsgnj_h.h new file mode 100644 index 00000000..79d50f5f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnj_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(fsgnj16(FRS1, FRS2, false, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnj_q.h b/vendor/riscv-isa-sim/riscv/insns/fsgnj_q.h new file mode 100644 index 00000000..0b9a2708 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnj_q.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_FRD(fsgnj128(FRS1, FRS2, false, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnj_s.h b/vendor/riscv-isa-sim/riscv/insns/fsgnj_s.h new file mode 100644 index 00000000..c1a70cb7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnj_s.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(fsgnj32(FRS1, FRS2, false, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjn_d.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_d.h new file mode 100644 index 00000000..f02c3116 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_d.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_FRD(fsgnj64(FRS1, FRS2, true, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjn_h.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_h.h new file mode 100644 index 00000000..ebb4ac9f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(fsgnj16(FRS1, FRS2, true, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjn_q.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_q.h new file mode 100644 index 00000000..38c7bbff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_q.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_FRD(fsgnj128(FRS1, FRS2, true, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjn_s.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_s.h new file mode 100644 index 00000000..35906d65 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjn_s.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(fsgnj32(FRS1, FRS2, true, false)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjx_d.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_d.h new file mode 100644 index 00000000..c1217371 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_d.h @@ -0,0 +1,3 @@ +require_extension('D'); +require_fp; +WRITE_FRD(fsgnj64(FRS1, FRS2, false, true)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjx_h.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_h.h new file mode 100644 index 00000000..93102695 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_h.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFH); +require_fp; +WRITE_FRD(fsgnj16(FRS1, FRS2, false, true)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjx_q.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_q.h new file mode 100644 index 00000000..fc86d26d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_q.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +WRITE_FRD(fsgnj128(FRS1, FRS2, false, true)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsgnjx_s.h b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_s.h new file mode 100644 index 00000000..4d5c624b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsgnjx_s.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +WRITE_FRD(fsgnj32(FRS1, FRS2, false, true)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsh.h b/vendor/riscv-isa-sim/riscv/insns/fsh.h new file mode 100644 index 00000000..9eaae1eb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsh.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZFHMIN); +require_fp; +MMU.store_uint16(RS1 + insn.s_imm(), FRS2.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsl.h b/vendor/riscv-isa-sim/riscv/insns/fsl.h new file mode 100644 index 00000000..53a21608 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsl.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBT); +int shamt = RS2 & (2*xlen-1); +reg_t a = RS1, b = RS3; +if (shamt >= xlen) { + a = RS3, b = RS1; + shamt -= xlen; +} +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen(shamt ? (a << shamt) | (zext_xlen(b) >> rshamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fslw.h b/vendor/riscv-isa-sim/riscv/insns/fslw.h new file mode 100644 index 00000000..83940105 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fslw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBT); +int shamt = RS2 & 63; +reg_t a = RS1, b = RS3; +if (shamt >= 32) { + a = RS3, b = RS1; + shamt -= 32; +} +int rshamt = -shamt & 31; +WRITE_RD(sext32(shamt ? (a << shamt) | (zext32(b) >> rshamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsq.h b/vendor/riscv-isa-sim/riscv/insns/fsq.h new file mode 100644 index 00000000..610960e5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsq.h @@ -0,0 +1,3 @@ +require_extension('Q'); +require_fp; +MMU.store_float128(RS1 + insn.s_imm(), FRS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsqrt_d.h b/vendor/riscv-isa-sim/riscv/insns/fsqrt_d.h new file mode 100644 index 00000000..da138ba1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsqrt_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_sqrt(f64(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsqrt_h.h b/vendor/riscv-isa-sim/riscv/insns/fsqrt_h.h new file mode 100644 index 00000000..138d5727 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsqrt_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_sqrt(f16(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsqrt_q.h b/vendor/riscv-isa-sim/riscv/insns/fsqrt_q.h new file mode 100644 index 00000000..6cb6ba31 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsqrt_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_sqrt(f128(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsqrt_s.h b/vendor/riscv-isa-sim/riscv/insns/fsqrt_s.h new file mode 100644 index 00000000..74768466 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsqrt_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_sqrt(f32(FRS1))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsr.h b/vendor/riscv-isa-sim/riscv/insns/fsr.h new file mode 100644 index 00000000..dfb26f11 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsr.h @@ -0,0 +1,9 @@ +require_either_extension(xlen == 32 ? EXT_ZBPBO : EXT_XZBT, EXT_XZBT); +int shamt = RS2 & (2*xlen-1); +reg_t a = RS1, b = RS3; +if (shamt >= xlen) { + a = RS3, b = RS1; + shamt -= xlen; +} +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen(shamt ? (b << rshamt) | (zext_xlen(a) >> shamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsri.h b/vendor/riscv-isa-sim/riscv/insns/fsri.h new file mode 100644 index 00000000..f7186f1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsri.h @@ -0,0 +1,9 @@ +require_either_extension(xlen == 32 ? EXT_ZBPBO : EXT_XZBT, EXT_XZBT); +int shamt = SHAMT & (2*xlen-1); +reg_t a = RS1, b = RS3; +if (shamt >= xlen) { + a = RS3, b = RS1; + shamt -= xlen; +} +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen(shamt ? (b << rshamt) | (zext_xlen(a) >> shamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsriw.h b/vendor/riscv-isa-sim/riscv/insns/fsriw.h new file mode 100644 index 00000000..7956de7c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsriw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBT); +int shamt = SHAMT & 63; +reg_t a = RS1, b = RS3; +if (shamt >= 32) { + a = RS3, b = RS1; + shamt -= 32; +} +int rshamt = -shamt & 31; +WRITE_RD(sext32(shamt ? (b << rshamt) | (zext32(a) >> shamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsrw.h b/vendor/riscv-isa-sim/riscv/insns/fsrw.h new file mode 100644 index 00000000..494fe260 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsrw.h @@ -0,0 +1,10 @@ +require_rv64; +require_either_extension(EXT_ZBPBO, EXT_XZBT); +int shamt = RS2 & 63; +reg_t a = RS1, b = RS3; +if (shamt >= 32) { + a = RS3, b = RS1; + shamt -= 32; +} +int rshamt = -shamt & 31; +WRITE_RD(sext32(shamt ? (b << rshamt) | (zext32(a) >> shamt) : a)); diff --git a/vendor/riscv-isa-sim/riscv/insns/fsub_d.h b/vendor/riscv-isa-sim/riscv/insns/fsub_d.h new file mode 100644 index 00000000..1418a063 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsub_d.h @@ -0,0 +1,5 @@ +require_extension('D'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f64_sub(f64(FRS1), f64(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsub_h.h b/vendor/riscv-isa-sim/riscv/insns/fsub_h.h new file mode 100644 index 00000000..43b51cc2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsub_h.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZFH); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f16_sub(f16(FRS1), f16(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsub_q.h b/vendor/riscv-isa-sim/riscv/insns/fsub_q.h new file mode 100644 index 00000000..e050e3aa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsub_q.h @@ -0,0 +1,5 @@ +require_extension('Q'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f128_sub(f128(FRS1), f128(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsub_s.h b/vendor/riscv-isa-sim/riscv/insns/fsub_s.h new file mode 100644 index 00000000..f6183ea0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsub_s.h @@ -0,0 +1,5 @@ +require_extension('F'); +require_fp; +softfloat_roundingMode = RM; +WRITE_FRD(f32_sub(f32(FRS1), f32(FRS2))); +set_fp_exceptions; diff --git a/vendor/riscv-isa-sim/riscv/insns/fsw.h b/vendor/riscv-isa-sim/riscv/insns/fsw.h new file mode 100644 index 00000000..8af51845 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/fsw.h @@ -0,0 +1,3 @@ +require_extension('F'); +require_fp; +MMU.store_uint32(RS1 + insn.s_imm(), FRS2.v[0]); diff --git a/vendor/riscv-isa-sim/riscv/insns/gorc.h b/vendor/riscv-isa-sim/riscv/insns/gorc.h new file mode 100644 index 00000000..ffe44134 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/gorc.h @@ -0,0 +1,10 @@ +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & (xlen-1); +if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +if (shamt & 32) x |= ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/gorci.h b/vendor/riscv-isa-sim/riscv/insns/gorci.h new file mode 100644 index 00000000..d3017f49 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/gorci.h @@ -0,0 +1,13 @@ +// Zbb contains orc.b but not general gorci +require(((SHAMT == 7) && p->extension_enabled(EXT_ZBB)) + || p->extension_enabled(EXT_XZBP)); +require(SHAMT < xlen); +reg_t x = RS1; +int shamt = SHAMT; +if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +if (shamt & 32) x |= ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/gorciw.h b/vendor/riscv-isa-sim/riscv/insns/gorciw.h new file mode 100644 index 00000000..44ade807 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/gorciw.h @@ -0,0 +1,11 @@ +require_rv64; +require_extension(EXT_XZBP); +require(SHAMT < 32); +reg_t x = RS1; +int shamt = SHAMT; +if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/gorcw.h b/vendor/riscv-isa-sim/riscv/insns/gorcw.h new file mode 100644 index 00000000..611b3caa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/gorcw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & 31; +if (shamt & 1) x |= ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x |= ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x |= ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x |= ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x |= ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/grev.h b/vendor/riscv-isa-sim/riscv/insns/grev.h new file mode 100644 index 00000000..7181b3cd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/grev.h @@ -0,0 +1,10 @@ +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & (xlen-1); +if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +if (shamt & 32) x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/grevi.h b/vendor/riscv-isa-sim/riscv/insns/grevi.h new file mode 100644 index 00000000..d4718145 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/grevi.h @@ -0,0 +1,17 @@ +// Zbb contains rev8 but not general grevi +// Zbkb contains rev8 and brev8 (a.k.a. rev.b) but not general grevi +int shamt = SHAMT; +require(((shamt == xlen - 8) && (p->extension_enabled(EXT_ZBB) || p->extension_enabled(EXT_ZBKB))) //rev8 + || ((shamt == 7) && p->extension_enabled(EXT_ZBKB)) // rev8.b + || ((shamt == 8) && p->extension_enabled(EXT_ZPN)) // rev8.h + || ((shamt == xlen - 1) && p->extension_enabled(EXT_ZPN)) // rev + || p->extension_enabled(EXT_XZBP)); +require(shamt < xlen); +reg_t x = RS1; +if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +if (shamt & 32) x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/greviw.h b/vendor/riscv-isa-sim/riscv/insns/greviw.h new file mode 100644 index 00000000..004ecf34 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/greviw.h @@ -0,0 +1,11 @@ +require_rv64; +require_extension(EXT_XZBP); +require(SHAMT < 32); +reg_t x = RS1; +int shamt = SHAMT; +if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/grevw.h b/vendor/riscv-isa-sim/riscv/insns/grevw.h new file mode 100644 index 00000000..3fbcf228 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/grevw.h @@ -0,0 +1,10 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & 31; +if (shamt & 1) x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1); +if (shamt & 2) x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2); +if (shamt & 4) x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4); +if (shamt & 8) x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8); +if (shamt & 16) x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hfence_gvma.h b/vendor/riscv-isa-sim/riscv/insns/hfence_gvma.h new file mode 100644 index 00000000..b3ddf1e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hfence_gvma.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.mstatus->read(), MSTATUS_TVM) ? PRV_M : PRV_S); +MMU.flush_tlb(); diff --git a/vendor/riscv-isa-sim/riscv/insns/hfence_vvma.h b/vendor/riscv-isa-sim/riscv/insns/hfence_vvma.h new file mode 100644 index 00000000..ecd42c19 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hfence_vvma.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(PRV_S); +MMU.flush_tlb(); diff --git a/vendor/riscv-isa-sim/riscv/insns/hinval_gvma.h b/vendor/riscv-isa-sim/riscv/insns/hinval_gvma.h new file mode 100644 index 00000000..6be5cd94 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hinval_gvma.h @@ -0,0 +1,2 @@ +require_extension(EXT_SVINVAL); +#include "hfence_gvma.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/hinval_vvma.h b/vendor/riscv-isa-sim/riscv/insns/hinval_vvma.h new file mode 100644 index 00000000..c50707c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hinval_vvma.h @@ -0,0 +1,2 @@ +require_extension(EXT_SVINVAL); +#include "hfence_vvma.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_b.h b/vendor/riscv-isa-sim/riscv/insns/hlv_b.h new file mode 100644 index 00000000..2ccb0463 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_b.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_int8(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_bu.h b/vendor/riscv-isa-sim/riscv/insns/hlv_bu.h new file mode 100644 index 00000000..560f94af --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_bu.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_uint8(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_d.h b/vendor/riscv-isa-sim/riscv/insns/hlv_d.h new file mode 100644 index 00000000..f432b650 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_d.h @@ -0,0 +1,5 @@ +require_extension('H'); +require_rv64; +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_int64(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_h.h b/vendor/riscv-isa-sim/riscv/insns/hlv_h.h new file mode 100644 index 00000000..4cb07e99 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_h.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_int16(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_hu.h b/vendor/riscv-isa-sim/riscv/insns/hlv_hu.h new file mode 100644 index 00000000..adec2f0b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_hu.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_uint16(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_w.h b/vendor/riscv-isa-sim/riscv/insns/hlv_w.h new file mode 100644 index 00000000..b2e102f0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_w.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_int32(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlv_wu.h b/vendor/riscv-isa-sim/riscv/insns/hlv_wu.h new file mode 100644 index 00000000..1f921c0f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlv_wu.h @@ -0,0 +1,5 @@ +require_extension('H'); +require_rv64; +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_uint32(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlvx_hu.h b/vendor/riscv-isa-sim/riscv/insns/hlvx_hu.h new file mode 100644 index 00000000..3eb699c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlvx_hu.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(MMU.guest_load_x_uint16(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/hlvx_wu.h b/vendor/riscv-isa-sim/riscv/insns/hlvx_wu.h new file mode 100644 index 00000000..33e2fa1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hlvx_wu.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +WRITE_RD(sext_xlen(MMU.guest_load_x_uint32(RS1))); diff --git a/vendor/riscv-isa-sim/riscv/insns/hsv_b.h b/vendor/riscv-isa-sim/riscv/insns/hsv_b.h new file mode 100644 index 00000000..15f6a268 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hsv_b.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +MMU.guest_store_uint8(RS1, RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/hsv_d.h b/vendor/riscv-isa-sim/riscv/insns/hsv_d.h new file mode 100644 index 00000000..83c3376e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hsv_d.h @@ -0,0 +1,5 @@ +require_extension('H'); +require_rv64; +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +MMU.guest_store_uint64(RS1, RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/hsv_h.h b/vendor/riscv-isa-sim/riscv/insns/hsv_h.h new file mode 100644 index 00000000..eaa2a2cb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hsv_h.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +MMU.guest_store_uint16(RS1, RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/hsv_w.h b/vendor/riscv-isa-sim/riscv/insns/hsv_w.h new file mode 100644 index 00000000..0d2c3d4d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/hsv_w.h @@ -0,0 +1,4 @@ +require_extension('H'); +require_novirt(); +require_privilege(get_field(STATE.hstatus->read(), HSTATUS_HU) ? PRV_U : PRV_S); +MMU.guest_store_uint32(RS1, RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/insb.h b/vendor/riscv-isa-sim/riscv/insns/insb.h new file mode 100644 index 00000000..020e9051 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/insb.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +reg_t bpos = insn.p_imm3(); +require(bpos < (unsigned long)xlen/8); // imm[2] == 1 is illegal on rv32 +WRITE_RD(sext_xlen(set_field(RD, make_mask64(bpos * 8, 8), P_B(RS1, 0)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/jal.h b/vendor/riscv-isa-sim/riscv/insns/jal.h new file mode 100644 index 00000000..cd599641 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/jal.h @@ -0,0 +1,3 @@ +reg_t tmp = npc; +set_pc(JUMP_TARGET); +WRITE_RD(tmp); diff --git a/vendor/riscv-isa-sim/riscv/insns/jalr.h b/vendor/riscv-isa-sim/riscv/insns/jalr.h new file mode 100644 index 00000000..386e8db1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/jalr.h @@ -0,0 +1,3 @@ +reg_t tmp = npc; +set_pc((RS1 + insn.i_imm()) & ~reg_t(1)); +WRITE_RD(tmp); diff --git a/vendor/riscv-isa-sim/riscv/insns/kabs16.h b/vendor/riscv-isa-sim/riscv/insns/kabs16.h new file mode 100644 index 00000000..8d1d9b83 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kabs16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_ONE_LOOP(16, { + pd = ps1; + if (ps1 == INT16_MIN) { + pd = INT16_MAX; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = - ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kabs32.h b/vendor/riscv-isa-sim/riscv/insns/kabs32.h new file mode 100644 index 00000000..0536aaca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kabs32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_ONE_LOOP(32, { + pd = ps1; + if (ps1 == INT32_MIN) { + pd = INT32_MAX; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = - ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kabs8.h b/vendor/riscv-isa-sim/riscv/insns/kabs8.h new file mode 100644 index 00000000..2e6e1f16 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kabs8.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_ONE_LOOP(8, { + pd = ps1; + if (ps1 == INT8_MIN) { + pd = INT8_MAX; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = - ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kabsw.h b/vendor/riscv-isa-sim/riscv/insns/kabsw.h new file mode 100644 index 00000000..5e83b759 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kabsw.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_extension(EXT_ZPN); +int32_t rs1 = P_W(RS1, 0); + +if (rs1 == INT32_MIN) { + rs1 = INT32_MAX; + P_SET_OV(1); +} + +WRITE_RD(sext_xlen(rs1 >= 0 ? rs1 : -rs1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kadd16.h b/vendor/riscv-isa-sim/riscv/insns/kadd16.h new file mode 100644 index 00000000..b6defe1f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kadd16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_LOOP(16, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kadd32.h b/vendor/riscv-isa-sim/riscv/insns/kadd32.h new file mode 100644 index 00000000..1728847a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kadd32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kadd64.h b/vendor/riscv-isa-sim/riscv/insns/kadd64.h new file mode 100644 index 00000000..c58fff09 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kadd64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_PROFILE({ + bool sat = false; + rd = (sat_add(rs1, rs2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kadd8.h b/vendor/riscv-isa-sim/riscv/insns/kadd8.h new file mode 100644 index 00000000..98864c70 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kadd8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_LOOP(8, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kaddh.h b/vendor/riscv-isa-sim/riscv/insns/kaddh.h new file mode 100644 index 00000000..43aedb2d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kaddh.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SH(RS1, 0) + (sreg_t)P_SH(RS2, 0); +P_SAT(res, 16); +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kaddw.h b/vendor/riscv-isa-sim/riscv/insns/kaddw.h new file mode 100644 index 00000000..3298d57e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kaddw.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SW(RS1, 0) + (sreg_t)P_SW(RS2, 0); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kcras16.h b/vendor/riscv-isa-sim/riscv/insns/kcras16.h new file mode 100644 index 00000000..d7464253 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kcras16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_CROSS_ULOOP(16, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kcras32.h b/vendor/riscv-isa-sim/riscv/insns/kcras32.h new file mode 100644 index 00000000..ead31f8a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kcras32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_CROSS_ULOOP(32, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kcrsa16.h b/vendor/riscv-isa-sim/riscv/insns/kcrsa16.h new file mode 100644 index 00000000..2a7ca4d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kcrsa16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_CROSS_ULOOP(16, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kcrsa32.h b/vendor/riscv-isa-sim/riscv/insns/kcrsa32.h new file mode 100644 index 00000000..b688fd3c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kcrsa32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_CROSS_ULOOP(32, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmabb.h b/vendor/riscv-isa-sim/riscv/insns/kdmabb.h new file mode 100644 index 00000000..7ca05639 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmabb.h @@ -0,0 +1,17 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 0); + +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} + +res += sext32(RD); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmabb16.h b/vendor/riscv-isa-sim/riscv/insns/kdmabb16.h new file mode 100644 index 00000000..2ccd40b1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmabb16.h @@ -0,0 +1,18 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 0); + int32_t mres; + bool sat; + + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + mres = aop * bop; + mres <<= 1; + } else { + mres = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmabt.h b/vendor/riscv-isa-sim/riscv/insns/kdmabt.h new file mode 100644 index 00000000..d50a6dfa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmabt.h @@ -0,0 +1,17 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 1); + +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} + +res += sext32(RD); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmabt16.h b/vendor/riscv-isa-sim/riscv/insns/kdmabt16.h new file mode 100644 index 00000000..49538b38 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmabt16.h @@ -0,0 +1,18 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 1); + int32_t mres; + bool sat; + + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + mres = aop * bop; + mres <<= 1; + } else { + mres = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmatt.h b/vendor/riscv-isa-sim/riscv/insns/kdmatt.h new file mode 100644 index 00000000..e917d414 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmatt.h @@ -0,0 +1,17 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 1); +sreg_t bop = P_SH(RS2, 1); + +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} + +res += sext32(RD); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmatt16.h b/vendor/riscv-isa-sim/riscv/insns/kdmatt16.h new file mode 100644 index 00000000..ebce13f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmatt16.h @@ -0,0 +1,18 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 1); + int32_t bop = P_SH(ps2, 1); + int32_t mres; + bool sat; + + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + mres = aop * bop; + mres <<= 1; + } else { + mres = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmbb.h b/vendor/riscv-isa-sim/riscv/insns/kdmbb.h new file mode 100644 index 00000000..2f7a3f95 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmbb.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 0); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmbb16.h b/vendor/riscv-isa-sim/riscv/insns/kdmbb16.h new file mode 100644 index 00000000..a84877d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmbb16.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 0); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd <<= 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmbt.h b/vendor/riscv-isa-sim/riscv/insns/kdmbt.h new file mode 100644 index 00000000..7f093e3a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmbt.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 1); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmbt16.h b/vendor/riscv-isa-sim/riscv/insns/kdmbt16.h new file mode 100644 index 00000000..85e9d0e6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmbt16.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 1); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd <<= 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmtt.h b/vendor/riscv-isa-sim/riscv/insns/kdmtt.h new file mode 100644 index 00000000..05a4c8c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmtt.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 1); +sreg_t bop = P_SH(RS2, 1); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res <<= 1; +} else { + res = INT32_MAX; + P_SET_OV(1); +} +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kdmtt16.h b/vendor/riscv-isa-sim/riscv/insns/kdmtt16.h new file mode 100644 index 00000000..2190710a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kdmtt16.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 1); + int32_t bop = P_SH(ps2, 1); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd <<= 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khm16.h b/vendor/riscv-isa-sim/riscv/insns/khm16.h new file mode 100644 index 00000000..9c2e28c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khm16.h @@ -0,0 +1,9 @@ +require_vector_vs; +P_LOOP(16, { + if ((ps1 != INT16_MIN) | (ps2 != INT16_MIN)) { + pd = (ps1 * ps2) >> 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khm8.h b/vendor/riscv-isa-sim/riscv/insns/khm8.h new file mode 100644 index 00000000..ac21d68c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khm8.h @@ -0,0 +1,9 @@ +require_vector_vs; +P_LOOP(8, { + if ((ps1 != INT8_MIN) | (ps2 != INT8_MIN)) { + pd = (ps1 * ps2) >> 7; + } else { + pd = INT8_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmbb.h b/vendor/riscv-isa-sim/riscv/insns/khmbb.h new file mode 100644 index 00000000..e08eddca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmbb.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 0); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res >>= 15; +} else { + res = INT16_MAX; + P_SET_OV(1); +} +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/khmbb16.h b/vendor/riscv-isa-sim/riscv/insns/khmbb16.h new file mode 100644 index 00000000..efbd7eb6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmbb16.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 0); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd >>= 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } + pd = (int16_t)pd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmbt.h b/vendor/riscv-isa-sim/riscv/insns/khmbt.h new file mode 100644 index 00000000..0c19cd16 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmbt.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 0); +sreg_t bop = P_SH(RS2, 1); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res >>= 15; +} else { + res = INT16_MAX; + P_SET_OV(1); +} +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/khmbt16.h b/vendor/riscv-isa-sim/riscv/insns/khmbt16.h new file mode 100644 index 00000000..4bb1f48c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmbt16.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 0); + int32_t bop = P_SH(ps2, 1); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd >>= 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } + pd = (int16_t)pd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmtt.h b/vendor/riscv-isa-sim/riscv/insns/khmtt.h new file mode 100644 index 00000000..dcd45030 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmtt.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res; +sreg_t aop = P_SH(RS1, 1); +sreg_t bop = P_SH(RS2, 1); +if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + res = aop * bop; + res >>= 15; +} else { + res = INT16_MAX; + P_SET_OV(1); +} +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/khmtt16.h b/vendor/riscv-isa-sim/riscv/insns/khmtt16.h new file mode 100644 index 00000000..d3c0b4cf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmtt16.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + int32_t aop = P_SH(ps1, 1); + int32_t bop = P_SH(ps2, 1); + if ((INT16_MIN != aop) | (INT16_MIN != bop)) { + pd = aop * bop; + pd >>= 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } + pd = (int16_t)pd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmx16.h b/vendor/riscv-isa-sim/riscv/insns/khmx16.h new file mode 100644 index 00000000..bf934627 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmx16.h @@ -0,0 +1,9 @@ +require_vector_vs; +P_CROSS_LOOP(16, { + if ((ps1 != INT16_MIN) | (ps2 != INT16_MIN)) { + pd = (ps1 * ps2) >> 15; + } else { + pd = INT16_MAX; + P_SET_OV(1); + } +},) diff --git a/vendor/riscv-isa-sim/riscv/insns/khmx8.h b/vendor/riscv-isa-sim/riscv/insns/khmx8.h new file mode 100644 index 00000000..0d6a5d5a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/khmx8.h @@ -0,0 +1,9 @@ +require_vector_vs; +P_CROSS_LOOP(8, { + if ((ps1 != INT8_MIN) | (ps2 != INT8_MIN)) { + pd = (ps1 * ps2) >> 7; + } else { + pd = INT8_MAX; + P_SET_OV(1); + } +},) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmabb.h b/vendor/riscv-isa-sim/riscv/insns/kmabb.h new file mode 100644 index 00000000..f2d7715e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmabb.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int32_t mres = P_SH(ps1, 0) * P_SH(ps2, 0); + bool sat = false; + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmabb32.h b/vendor/riscv-isa-sim/riscv/insns/kmabb32.h new file mode 100644 index 00000000..752bf8b5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmabb32.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat = false; +sreg_t mres = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +WRITE_RD((sat_add(RD, mres, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmabt.h b/vendor/riscv-isa-sim/riscv/insns/kmabt.h new file mode 100644 index 00000000..4ead23bc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmabt.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int32_t mres = P_SH(ps1, 0) * P_SH(ps2, 1); + bool sat = false; + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmabt32.h b/vendor/riscv-isa-sim/riscv/insns/kmabt32.h new file mode 100644 index 00000000..ee7511bd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmabt32.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat = false; +sreg_t mres = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +WRITE_RD((sat_add(RD, mres, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmada.h b/vendor/riscv-isa-sim/riscv/insns/kmada.h new file mode 100644 index 00000000..3c082c7b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmada.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, true, true, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmadrs.h b/vendor/riscv-isa-sim/riscv/insns/kmadrs.h new file mode 100644 index 00000000..a4503517 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmadrs.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, true, true, { + if (j & 1) + pd_res -= ps1 * ps2; + else + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmadrs32.h b/vendor/riscv-isa-sim/riscv/insns/kmadrs32.h new file mode 100644 index 00000000..0f71e90f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmadrs32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD((sat_add(RD, mres0, -mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmads.h b/vendor/riscv-isa-sim/riscv/insns/kmads.h new file mode 100644 index 00000000..89aabe05 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmads.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, true, true, { + if (j & 1) + pd_res += ps1 * ps2; + else + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmads32.h b/vendor/riscv-isa-sim/riscv/insns/kmads32.h new file mode 100644 index 00000000..0a3b5905 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmads32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD((sat_add(RD, -mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmar64.h b/vendor/riscv-isa-sim/riscv/insns/kmar64.h new file mode 100644 index 00000000..49f44823 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmar64.h @@ -0,0 +1,16 @@ +require_vector_vs; +P_64_PROFILE_BASE() +P_64_PROFILE_PARAM(true, false) + +bool sat = false; +sreg_t mres0 = (sreg_t)P_SW(rs1, 0) * P_SW(rs2, 0); +sreg_t mres1 = (sreg_t)P_SW(rs1, 1) * P_SW(rs2, 1); +sreg_t res; + +if (xlen == 32) { + rd = (sat_add(rd, mres0, sat)); +} else { + rd = (sat_add(rd, mres0, mres1, sat)); +} +P_SET_OV(sat); +P_64_PROFILE_END() diff --git a/vendor/riscv-isa-sim/riscv/insns/kmatt.h b/vendor/riscv-isa-sim/riscv/insns/kmatt.h new file mode 100644 index 00000000..4be2f3d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmatt.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int32_t mres = P_SH(ps1, 1) * P_SH(ps2, 1); + bool sat = false; + pd = (sat_add(pd, mres, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmatt32.h b/vendor/riscv-isa-sim/riscv/insns/kmatt32.h new file mode 100644 index 00000000..4fe9ed2b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmatt32.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat = false; +sreg_t mres = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); +WRITE_RD((sat_add(RD, mres, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmaxda.h b/vendor/riscv-isa-sim/riscv/insns/kmaxda.h new file mode 100644 index 00000000..393f0472 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmaxda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_CROSS_LOOP(32, 16, true, true, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmaxda32.h b/vendor/riscv-isa-sim/riscv/insns/kmaxda32.h new file mode 100644 index 00000000..b9346b96 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmaxda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD((sat_add(RD, mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmaxds.h b/vendor/riscv-isa-sim/riscv/insns/kmaxds.h new file mode 100644 index 00000000..c2f0e591 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmaxds.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_REDUCTION_CROSS_LOOP(32, 16, true, true, { + if (j & 1) + pd_res += ps1 * ps2; + else + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmaxds32.h b/vendor/riscv-isa-sim/riscv/insns/kmaxds32.h new file mode 100644 index 00000000..6a7d64e4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmaxds32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD((sat_add(RD, -mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmda.h b/vendor/riscv-isa-sim/riscv/insns/kmda.h new file mode 100644 index 00000000..68b6c9a7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, false, true, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmda32.h b/vendor/riscv-isa-sim/riscv/insns/kmda32.h new file mode 100644 index 00000000..646021f1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD((sat_add(mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmac.h b/vendor/riscv-isa-sim/riscv/insns/kmmac.h new file mode 100644 index 00000000..946f0fe5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmac.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + bool sat = false; + pd = (sat_add(pd, (mres >> 32), sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmac_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmac_u.h new file mode 100644 index 00000000..5a06a4db --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmac_u.h @@ -0,0 +1,8 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + int32_t round = (((mres >> 31) + 1) >> 1); + bool sat = false; + pd = (sat_add(pd, round, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawb.h b/vendor/riscv-isa-sim/riscv/insns/kmmawb.h new file mode 100644 index 00000000..0e3a6944 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawb.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t)ps1 * P_SH(ps2, 0); + bool sat = false; + pd = (sat_add(pd, (mres >> 16), sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawb2.h b/vendor/riscv-isa-sim/riscv/insns/kmmawb2.h new file mode 100644 index 00000000..6b3aa0dd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawb2.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_LOOP(32, { + int64_t addop = 0; + int64_t mres = 0; + bool sat = false; + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; + addop = mres >> 16; + } else { + addop = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, addop, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawb2_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmawb2_u.h new file mode 100644 index 00000000..f44346e1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawb2_u.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_LOOP(32, { + int64_t addop = 0; + int64_t mres = 0; + bool sat = false; + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; + addop = ((mres >> 15) + 1) >> 1; + } else { + addop = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, addop, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawb_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmawb_u.h new file mode 100644 index 00000000..766dd716 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawb_u.h @@ -0,0 +1,8 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t)ps1 * P_SH(ps2, 0); + int32_t round = (((mres >> 15) + 1) >> 1); + bool sat = false; + pd = (sat_add(pd, round, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawt.h b/vendor/riscv-isa-sim/riscv/insns/kmmawt.h new file mode 100644 index 00000000..514ee484 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawt.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t)ps1 * P_SH(ps2, 1); + bool sat = false; + pd = (sat_add(pd, (mres >> 16), sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawt2.h b/vendor/riscv-isa-sim/riscv/insns/kmmawt2.h new file mode 100644 index 00000000..3cd72de7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawt2.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_LOOP(32, { + int64_t addop = 0; + int64_t mres = 0; + bool sat = false; + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; + addop = mres >> 16; + } else { + addop = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, addop, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawt2_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmawt2_u.h new file mode 100644 index 00000000..7fe378c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawt2_u.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_LOOP(32, { + int64_t addop = 0; + int64_t mres = 0; + bool sat = false; + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; + addop = ((mres >> 15) + 1) >> 1; + } else { + addop = INT32_MAX; + P_SET_OV(1); + } + pd = (sat_add(pd, addop, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmawt_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmawt_u.h new file mode 100644 index 00000000..74d8fd01 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmawt_u.h @@ -0,0 +1,8 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t)ps1 * P_SH(ps2, 1); + int32_t round = (((mres >> 15) + 1) >> 1); + bool sat = false; + pd = (sat_add(pd, round, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmsb.h b/vendor/riscv-isa-sim/riscv/insns/kmmsb.h new file mode 100644 index 00000000..29ad1bfa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmsb.h @@ -0,0 +1,7 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + bool sat = false; + pd = (sat_sub(pd, (mres >> 32), sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmsb_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmsb_u.h new file mode 100644 index 00000000..c7b283ea --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmsb_u.h @@ -0,0 +1,8 @@ +require_vector_vs; +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + int32_t round = (((mres >> 31) + 1) >> 1); + bool sat = false; + pd = (sat_sub(pd, round, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmwb2.h b/vendor/riscv-isa-sim/riscv/insns/kmmwb2.h new file mode 100644 index 00000000..272f7380 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmwb2.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + int64_t mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; + pd = mres >> 16; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmwb2_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmwb2_u.h new file mode 100644 index 00000000..b5a5006c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmwb2_u.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 0))) { + int64_t mres = ((int64_t) ps1 * P_SH(ps2, 0)) << 1; + pd = ((mres >> 15) + 1) >> 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmwt2.h b/vendor/riscv-isa-sim/riscv/insns/kmmwt2.h new file mode 100644 index 00000000..73d3dc8c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmwt2.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + int64_t mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; + pd = mres >> 16; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmmwt2_u.h b/vendor/riscv-isa-sim/riscv/insns/kmmwt2_u.h new file mode 100644 index 00000000..1f525a8a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmmwt2_u.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT16_MIN != P_SH(ps2, 1))) { + int64_t mres = ((int64_t) ps1 * P_SH(ps2, 1)) << 1; + pd = ((mres >> 15) + 1) >> 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsda.h b/vendor/riscv-isa-sim/riscv/insns/kmsda.h new file mode 100644 index 00000000..94b118a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_LOOP(32, 16, true, true, { + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsda32.h b/vendor/riscv-isa-sim/riscv/insns/kmsda32.h new file mode 100644 index 00000000..d54d42c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD((sat_add(RD, -mres0, -mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsr64.h b/vendor/riscv-isa-sim/riscv/insns/kmsr64.h new file mode 100644 index 00000000..bfef5033 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsr64.h @@ -0,0 +1,26 @@ +require_vector_vs; +P_64_PROFILE_BASE() +P_64_PROFILE_PARAM(true, false) + +bool sat = false; +sreg_t mres0 = -(sreg_t)P_SW(rs1, 0) * P_SW(rs2, 0); +sreg_t mres1 = -(sreg_t)P_SW(rs1, 1) * P_SW(rs2, 1); +sreg_t res; + +if (xlen == 32) { + rd = (sat_add(rd, mres0, sat)); +} else { + if ((rd ^ mres0) < 0) { + res = rd + mres0; + rd = (sat_add(res, mres1, sat)); + } else if ((rd ^ mres1) < 0) { + res = rd + mres1; + rd = (sat_add(res, mres0, sat)); + } else { + rd = (sat_add(rd, mres0, sat)); + P_SET_OV(sat); + rd = (sat_add(rd, mres1, sat)); + } +} +P_SET_OV(sat); +P_64_PROFILE_END() diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsxda.h b/vendor/riscv-isa-sim/riscv/insns/kmsxda.h new file mode 100644 index 00000000..2d0faa36 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsxda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_CROSS_LOOP(32, 16, true, true, { + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmsxda32.h b/vendor/riscv-isa-sim/riscv/insns/kmsxda32.h new file mode 100644 index 00000000..3006b542 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmsxda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD((sat_add(RD, -mres0, -mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/kmxda.h b/vendor/riscv-isa-sim/riscv/insns/kmxda.h new file mode 100644 index 00000000..4addd8a5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmxda.h @@ -0,0 +1,4 @@ +require_vector_vs; +P_REDUCTION_CROSS_LOOP(32, 16, false, true, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kmxda32.h b/vendor/riscv-isa-sim/riscv/insns/kmxda32.h new file mode 100644 index 00000000..99a8204e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kmxda32.h @@ -0,0 +1,10 @@ +require_vector_vs; +require_rv64; +require_extension(EXT_ZPN); + +bool sat; +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD((sat_add(mres0, mres1, sat))); +P_SET_OV(sat); diff --git a/vendor/riscv-isa-sim/riscv/insns/ksll16.h b/vendor/riscv-isa-sim/riscv/insns/ksll16.h new file mode 100644 index 00000000..9e03b347 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksll16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_X_LOOP(16, 4, { + auto res = (sreg_t)ps1 << sa; + P_SAT(res, 16); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksll32.h b/vendor/riscv-isa-sim/riscv/insns/ksll32.h new file mode 100644 index 00000000..35888986 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksll32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_X_LOOP(32, 5, { + auto res = (sreg_t)ps1 << sa; + P_SAT(res, 32); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksll8.h b/vendor/riscv-isa-sim/riscv/insns/ksll8.h new file mode 100644 index 00000000..930ea03b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksll8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_X_LOOP(8, 3, { + auto res = (sreg_t)ps1 << sa; + P_SAT(res, 8); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslli16.h b/vendor/riscv-isa-sim/riscv/insns/kslli16.h new file mode 100644 index 00000000..edc7c671 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslli16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_I_LOOP(16, 4, { + auto res = (sreg_t)ps1 << imm4u; + P_SAT(res, 16); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslli32.h b/vendor/riscv-isa-sim/riscv/insns/kslli32.h new file mode 100644 index 00000000..4fd506b2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslli32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_I_LOOP(32, 5, { + auto res = (sreg_t)ps1 << imm5u; + P_SAT(res, 32); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslli8.h b/vendor/riscv-isa-sim/riscv/insns/kslli8.h new file mode 100644 index 00000000..18d714f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslli8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_I_LOOP(8, 3, { + auto res = (sreg_t)ps1 << imm3u; + P_SAT(res, 8); + pd = res; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslliw.h b/vendor/riscv-isa-sim/riscv/insns/kslliw.h new file mode 100644 index 00000000..8902d3a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslliw.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t rs1 = sext32(RS1); +sreg_t sa = insn.p_imm5(); +sreg_t res = rs1 << sa; + +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ksllw.h b/vendor/riscv-isa-sim/riscv/insns/ksllw.h new file mode 100644 index 00000000..7e8452f8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksllw.h @@ -0,0 +1,8 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t rs1 = sext32(RS1); +sreg_t sa = get_field(RS2, make_mask64(0, 5)); +sreg_t res = rs1 << sa; + +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra16.h b/vendor/riscv-isa-sim/riscv/insns/kslra16.h new file mode 100644 index 00000000..ad1443a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra16.h @@ -0,0 +1,12 @@ +require_vector_vs; +P_X_LOOP(16, 5, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 16) ? 15 : sa; + pd = ps1 >> sa; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 16); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra16_u.h b/vendor/riscv-isa-sim/riscv/insns/kslra16_u.h new file mode 100644 index 00000000..8335f3e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra16_u.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_X_LOOP(16, 5, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 16) ? 15 : sa; + if(sa != 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 16); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra32.h b/vendor/riscv-isa-sim/riscv/insns/kslra32.h new file mode 100644 index 00000000..871d6011 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra32.h @@ -0,0 +1,13 @@ +require_vector_vs; +require_rv64; +P_X_LOOP(32, 6, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 32) ? 31 : sa; + pd = ps1 >> sa; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 32); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra32_u.h b/vendor/riscv-isa-sim/riscv/insns/kslra32_u.h new file mode 100644 index 00000000..d53c8fe1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra32_u.h @@ -0,0 +1,16 @@ +require_vector_vs; +require_rv64; +P_X_LOOP(32, 6, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 32) ? 31 : sa; + if(sa != 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 32); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra8.h b/vendor/riscv-isa-sim/riscv/insns/kslra8.h new file mode 100644 index 00000000..b3f3e6b8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra8.h @@ -0,0 +1,12 @@ +require_vector_vs; +P_X_LOOP(8, 4, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 8) ? 7 : sa; + pd = ps1 >> sa; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 8); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslra8_u.h b/vendor/riscv-isa-sim/riscv/insns/kslra8_u.h new file mode 100644 index 00000000..620f3bd3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslra8_u.h @@ -0,0 +1,15 @@ +require_vector_vs; +P_X_LOOP(8, 4, { + if (ssa < 0) { + sa = -ssa; + sa = (sa == 8) ? 7 : sa; + if(sa != 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; + } else { + auto res = (sreg_t)ps1 << ssa; + P_SAT(res, 8); + pd = res; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kslraw.h b/vendor/riscv-isa-sim/riscv/insns/kslraw.h new file mode 100644 index 00000000..fa4c8443 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslraw.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t rs1 = sext32(RS1); +sreg_t sa = int64_t(RS2) << (64 - 6) >> (64 - 6); + +if (sa < 0) { + sa = -sa; + sa = (sa == 32) ? 31 : sa; + WRITE_RD(sext32(rs1 >> sa)); +} else { + auto res = rs1 << sa; + P_SAT(res, 32); + WRITE_RD(sext32(res)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/kslraw_u.h b/vendor/riscv-isa-sim/riscv/insns/kslraw_u.h new file mode 100644 index 00000000..ebecb615 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kslraw_u.h @@ -0,0 +1,14 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t rs1 = sext32(RS1); +sreg_t sa = int64_t(RS2) << (64 - 6) >> (64 - 6); + +if (sa < 0) { + sa = -sa; + sa = (sa == 32) ? 31 : sa; + WRITE_RD(sext32(((rs1 >> (sa - 1)) + 1)) >> 1); +} else { + auto res = rs1 << sa; + P_SAT(res, 32); + WRITE_RD(sext32(res)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/kstas16.h b/vendor/riscv-isa-sim/riscv/insns/kstas16.h new file mode 100644 index 00000000..ad180131 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kstas16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_STRAIGHT_ULOOP(16, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kstas32.h b/vendor/riscv-isa-sim/riscv/insns/kstas32.h new file mode 100644 index 00000000..35f23e03 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kstas32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_STRAIGHT_ULOOP(32, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kstsa16.h b/vendor/riscv-isa-sim/riscv/insns/kstsa16.h new file mode 100644 index 00000000..47a8918b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kstsa16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_STRAIGHT_ULOOP(16, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kstsa32.h b/vendor/riscv-isa-sim/riscv/insns/kstsa32.h new file mode 100644 index 00000000..aa9c372f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kstsa32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_STRAIGHT_ULOOP(32, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_add(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksub16.h b/vendor/riscv-isa-sim/riscv/insns/ksub16.h new file mode 100644 index 00000000..57562b5d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksub16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_LOOP(16, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksub32.h b/vendor/riscv-isa-sim/riscv/insns/ksub32.h new file mode 100644 index 00000000..3ef32e87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksub32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_LOOP(32, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksub64.h b/vendor/riscv-isa-sim/riscv/insns/ksub64.h new file mode 100644 index 00000000..c6f09948 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksub64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_PROFILE({ + bool sat = false; + rd = (sat_sub(rs1, rs2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksub8.h b/vendor/riscv-isa-sim/riscv/insns/ksub8.h new file mode 100644 index 00000000..705f6329 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksub8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_LOOP(8, { + bool sat = false; + pd = (sat_sub(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ksubh.h b/vendor/riscv-isa-sim/riscv/insns/ksubh.h new file mode 100644 index 00000000..2455c161 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksubh.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SH(RS1, 0) - (sreg_t)P_SH(RS2, 0); +P_SAT(res, 16); +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ksubw.h b/vendor/riscv-isa-sim/riscv/insns/ksubw.h new file mode 100644 index 00000000..3a3d7806 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ksubw.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SW(RS1, 0) - (sreg_t)P_SW(RS2, 0); +P_SAT(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/kwmmul.h b/vendor/riscv-isa-sim/riscv/insns/kwmmul.h new file mode 100644 index 00000000..b0ab8d4d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kwmmul.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT32_MIN != ps2)) { + int64_t mres = ((int64_t) ps1 * (int64_t) ps2) << 1; + pd = mres >> 32; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/kwmmul_u.h b/vendor/riscv-isa-sim/riscv/insns/kwmmul_u.h new file mode 100644 index 00000000..c2045e19 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/kwmmul_u.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_LOOP(32, { + if((INT32_MIN != ps1) | (INT32_MIN != ps2)) { + int64_t mres = ((int64_t) ps1 * (int64_t) ps2) << 1; + pd = ((mres >> 31) + 1) >> 1; + } else { + pd = INT32_MAX; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/lb.h b/vendor/riscv-isa-sim/riscv/insns/lb.h new file mode 100644 index 00000000..0f0999ca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lb.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_int8(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lbu.h b/vendor/riscv-isa-sim/riscv/insns/lbu.h new file mode 100644 index 00000000..64d4a688 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lbu.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_uint8(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/ld.h b/vendor/riscv-isa-sim/riscv/insns/ld.h new file mode 100644 index 00000000..1122b980 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ld.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(MMU.load_int64(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lh.h b/vendor/riscv-isa-sim/riscv/insns/lh.h new file mode 100644 index 00000000..0d458e0e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lh.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_int16(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lhu.h b/vendor/riscv-isa-sim/riscv/insns/lhu.h new file mode 100644 index 00000000..9d240702 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lhu.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_uint16(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lr_d.h b/vendor/riscv-isa-sim/riscv/insns/lr_d.h new file mode 100644 index 00000000..6dd8d672 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lr_d.h @@ -0,0 +1,5 @@ +require_extension('A'); +require_rv64; +auto res = MMU.load_int64(RS1, true); +MMU.acquire_load_reservation(RS1); +WRITE_RD(res); diff --git a/vendor/riscv-isa-sim/riscv/insns/lr_w.h b/vendor/riscv-isa-sim/riscv/insns/lr_w.h new file mode 100644 index 00000000..185be53b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lr_w.h @@ -0,0 +1,4 @@ +require_extension('A'); +auto res = MMU.load_int32(RS1, true); +MMU.acquire_load_reservation(RS1); +WRITE_RD(res); diff --git a/vendor/riscv-isa-sim/riscv/insns/lui.h b/vendor/riscv-isa-sim/riscv/insns/lui.h new file mode 100644 index 00000000..c7b5264e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lui.h @@ -0,0 +1 @@ +WRITE_RD(insn.u_imm()); diff --git a/vendor/riscv-isa-sim/riscv/insns/lw.h b/vendor/riscv-isa-sim/riscv/insns/lw.h new file mode 100644 index 00000000..4e8ed040 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lw.h @@ -0,0 +1 @@ +WRITE_RD(MMU.load_int32(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/lwu.h b/vendor/riscv-isa-sim/riscv/insns/lwu.h new file mode 100644 index 00000000..dcc4d75b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/lwu.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(MMU.load_uint32(RS1 + insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/maddr32.h b/vendor/riscv-isa-sim/riscv/insns/maddr32.h new file mode 100644 index 00000000..943aeac9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/maddr32.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZPN); +reg_t mres = (reg_t)P_W(RS1, 0) * P_W(RS2, 0); +reg_t rd = P_W(RD, 0); +rd += mres; +WRITE_RD(sext_xlen((int32_t)rd)); diff --git a/vendor/riscv-isa-sim/riscv/insns/max.h b/vendor/riscv-isa-sim/riscv/insns/max.h new file mode 100644 index 00000000..073b8df2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/max.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBPBO, EXT_ZBB); +WRITE_RD(sext_xlen(sreg_t(RS1) > sreg_t(RS2) ? RS1 : RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/maxu.h b/vendor/riscv-isa-sim/riscv/insns/maxu.h new file mode 100644 index 00000000..05af4925 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/maxu.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBB); +WRITE_RD(sext_xlen(RS1 > RS2 ? RS1 : RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/min.h b/vendor/riscv-isa-sim/riscv/insns/min.h new file mode 100644 index 00000000..47bc993c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/min.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBPBO, EXT_ZBB); +WRITE_RD(sext_xlen(sreg_t(RS1) < sreg_t(RS2) ? RS1 : RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/minu.h b/vendor/riscv-isa-sim/riscv/insns/minu.h new file mode 100644 index 00000000..7582c0d1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/minu.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBB); +WRITE_RD(sext_xlen(RS1 < RS2 ? RS1 : RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mret.h b/vendor/riscv-isa-sim/riscv/insns/mret.h new file mode 100644 index 00000000..5198b8fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mret.h @@ -0,0 +1,14 @@ +require_privilege(PRV_M); +set_pc_and_serialize(p->get_state()->mepc->read()); +reg_t s = STATE.mstatus->read(); +reg_t prev_prv = get_field(s, MSTATUS_MPP); +reg_t prev_virt = get_field(s, MSTATUS_MPV); +if (prev_prv != PRV_M) + s = set_field(s, MSTATUS_MPRV, 0); +s = set_field(s, MSTATUS_MIE, get_field(s, MSTATUS_MPIE)); +s = set_field(s, MSTATUS_MPIE, 1); +s = set_field(s, MSTATUS_MPP, p->extension_enabled('U') ? PRV_U : PRV_M); +s = set_field(s, MSTATUS_MPV, 0); +p->put_csr(CSR_MSTATUS, s); +p->set_privilege(prev_prv); +p->set_virt(prev_virt); diff --git a/vendor/riscv-isa-sim/riscv/insns/msubr32.h b/vendor/riscv-isa-sim/riscv/insns/msubr32.h new file mode 100644 index 00000000..2086bd19 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/msubr32.h @@ -0,0 +1,5 @@ +require_extension(EXT_ZPN); +reg_t mres = (reg_t)P_W(RS1, 0) * P_W(RS2, 0); +reg_t rd = P_W(RD, 0); +rd -= mres; +WRITE_RD(sext_xlen((int32_t)rd)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mul.h b/vendor/riscv-isa-sim/riscv/insns/mul.h new file mode 100644 index 00000000..52d00225 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mul.h @@ -0,0 +1,2 @@ +require_either_extension('M', EXT_ZMMUL); +WRITE_RD(sext_xlen(RS1 * RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulh.h b/vendor/riscv-isa-sim/riscv/insns/mulh.h new file mode 100644 index 00000000..a8f67d12 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulh.h @@ -0,0 +1,5 @@ +require_either_extension('M', EXT_ZMMUL); +if (xlen == 64) + WRITE_RD(mulh(RS1, RS2)); +else + WRITE_RD(sext32((sext32(RS1) * sext32(RS2)) >> 32)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulhsu.h b/vendor/riscv-isa-sim/riscv/insns/mulhsu.h new file mode 100644 index 00000000..cb5caa4e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulhsu.h @@ -0,0 +1,5 @@ +require_either_extension('M', EXT_ZMMUL); +if (xlen == 64) + WRITE_RD(mulhsu(RS1, RS2)); +else + WRITE_RD(sext32((sext32(RS1) * reg_t((uint32_t)RS2)) >> 32)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulhu.h b/vendor/riscv-isa-sim/riscv/insns/mulhu.h new file mode 100644 index 00000000..9ce751e3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulhu.h @@ -0,0 +1,5 @@ +require_either_extension('M', EXT_ZMMUL); +if (xlen == 64) + WRITE_RD(mulhu(RS1, RS2)); +else + WRITE_RD(sext32(((uint64_t)(uint32_t)RS1 * (uint64_t)(uint32_t)RS2) >> 32)); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulr64.h b/vendor/riscv-isa-sim/riscv/insns/mulr64.h new file mode 100644 index 00000000..4e2aad75 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulr64.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZPSFOPERAND); +reg_t rd = (reg_t)P_W(RS1, 0) * P_W(RS2, 0); +P_64_PROFILE_END(); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulsr64.h b/vendor/riscv-isa-sim/riscv/insns/mulsr64.h new file mode 100644 index 00000000..a2a51156 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulsr64.h @@ -0,0 +1,3 @@ +require_extension(EXT_ZPSFOPERAND); +sreg_t rd = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +P_64_PROFILE_END(); diff --git a/vendor/riscv-isa-sim/riscv/insns/mulw.h b/vendor/riscv-isa-sim/riscv/insns/mulw.h new file mode 100644 index 00000000..20108d84 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/mulw.h @@ -0,0 +1,3 @@ +require_either_extension('M', EXT_ZMMUL); +require_rv64; +WRITE_RD(sext32(RS1 * RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/or.h b/vendor/riscv-isa-sim/riscv/insns/or.h new file mode 100644 index 00000000..3f2fffc2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/or.h @@ -0,0 +1 @@ +WRITE_RD(RS1 | RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/ori.h b/vendor/riscv-isa-sim/riscv/insns/ori.h new file mode 100644 index 00000000..3aba1cb2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ori.h @@ -0,0 +1,2 @@ +// prefetch.i/r/w hint when rd = 0 and i_imm[4:0] = 0/1/3 +WRITE_RD(insn.i_imm() | RS1); diff --git a/vendor/riscv-isa-sim/riscv/insns/orn.h b/vendor/riscv-isa-sim/riscv/insns/orn.h new file mode 100644 index 00000000..c1c9fd4d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/orn.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +WRITE_RD(RS1 | ~RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/pack.h b/vendor/riscv-isa-sim/riscv/insns/pack.h new file mode 100644 index 00000000..2140f918 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pack.h @@ -0,0 +1,11 @@ +// RV32Zbb contains zext.h but not general pack +require(((xlen == 32) && (insn.rs2() == 0) && p->extension_enabled(EXT_ZBB)) + || p->extension_enabled(EXT_ZPN) + || p->extension_enabled(EXT_ZBKB) + || p->extension_enabled(EXT_XZBP) + || p->extension_enabled(EXT_XZBE) + || p->extension_enabled(EXT_XZBF) + || ((xlen == 64) && p->extension_enabled(EXT_XZBM))); +reg_t lo = zext_xlen(RS1 << (xlen/2)) >> (xlen/2); +reg_t hi = zext_xlen(RS2 << (xlen/2)); +WRITE_RD(sext_xlen(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/packh.h b/vendor/riscv-isa-sim/riscv/insns/packh.h new file mode 100644 index 00000000..82886e32 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/packh.h @@ -0,0 +1,7 @@ +require(p->extension_enabled(EXT_ZBKB) || + p->extension_enabled(EXT_XZBP) || + p->extension_enabled(EXT_XZBE) || + p->extension_enabled(EXT_XZBF)); +reg_t lo = zext_xlen(RS1 << (xlen-8)) >> (xlen-8); +reg_t hi = zext_xlen(RS2 << (xlen-8)) >> (xlen-16); +WRITE_RD(sext_xlen(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/packu.h b/vendor/riscv-isa-sim/riscv/insns/packu.h new file mode 100644 index 00000000..441207c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/packu.h @@ -0,0 +1,6 @@ +require(p->extension_enabled(EXT_ZPN) || + p->extension_enabled(EXT_XZBP) || + ((xlen == 64) && p->extension_enabled(EXT_XZBM))); +reg_t lo = zext_xlen(RS1) >> (xlen/2); +reg_t hi = zext_xlen(RS2) >> (xlen/2) << (xlen/2); +WRITE_RD(sext_xlen(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/packuw.h b/vendor/riscv-isa-sim/riscv/insns/packuw.h new file mode 100644 index 00000000..1b3f7d5f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/packuw.h @@ -0,0 +1,5 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t lo = zext32(RS1) >> 16; +reg_t hi = zext32(RS2) >> 16 << 16; +WRITE_RD(sext32(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/packw.h b/vendor/riscv-isa-sim/riscv/insns/packw.h new file mode 100644 index 00000000..084c190d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/packw.h @@ -0,0 +1,10 @@ +// RV64Zbb contains zext.h but not general packw +require(((insn.rs2() == 0) && p->extension_enabled(EXT_ZBB)) + || p->extension_enabled(EXT_ZBKB) + || p->extension_enabled(EXT_XZBP) + || p->extension_enabled(EXT_XZBE) + || p->extension_enabled(EXT_XZBF)); +require_rv64; +reg_t lo = zext32(RS1 << 16) >> 16; +reg_t hi = zext32(RS2 << 16); +WRITE_RD(sext32(lo | hi)); diff --git a/vendor/riscv-isa-sim/riscv/insns/pbsad.h b/vendor/riscv-isa-sim/riscv/insns/pbsad.h new file mode 100644 index 00000000..32789ef8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pbsad.h @@ -0,0 +1,3 @@ +P_REDUCTION_ULOOP(64, 8, false, false, { + pd_res += (ps1 > ps2 ? ps1 - ps2 : ps2 - ps1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/pbsada.h b/vendor/riscv-isa-sim/riscv/insns/pbsada.h new file mode 100644 index 00000000..cab988ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pbsada.h @@ -0,0 +1,3 @@ +P_REDUCTION_ULOOP(64, 8, true, false, { + pd_res += (ps1 > ps2 ? ps1 - ps2 : ps2 - ps1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/pkbb16.h b/vendor/riscv-isa-sim/riscv/insns/pkbb16.h new file mode 100644 index 00000000..20dcde61 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pkbb16.h @@ -0,0 +1,2 @@ +require_rv64; +P_PK(16, 0, 0); diff --git a/vendor/riscv-isa-sim/riscv/insns/pkbt16.h b/vendor/riscv-isa-sim/riscv/insns/pkbt16.h new file mode 100644 index 00000000..8c51ab7d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pkbt16.h @@ -0,0 +1 @@ +P_PK(16, 0, 1); diff --git a/vendor/riscv-isa-sim/riscv/insns/pkbt32.h b/vendor/riscv-isa-sim/riscv/insns/pkbt32.h new file mode 100644 index 00000000..2783d980 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pkbt32.h @@ -0,0 +1,2 @@ +require_rv64; +P_PK(32, 0, 1); diff --git a/vendor/riscv-isa-sim/riscv/insns/pktb16.h b/vendor/riscv-isa-sim/riscv/insns/pktb16.h new file mode 100644 index 00000000..c49c1ed3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pktb16.h @@ -0,0 +1 @@ +P_PK(16, 1, 0); diff --git a/vendor/riscv-isa-sim/riscv/insns/pktb32.h b/vendor/riscv-isa-sim/riscv/insns/pktb32.h new file mode 100644 index 00000000..0a7e17f4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pktb32.h @@ -0,0 +1,2 @@ +require_rv64; +P_PK(32, 1, 0); diff --git a/vendor/riscv-isa-sim/riscv/insns/pktt16.h b/vendor/riscv-isa-sim/riscv/insns/pktt16.h new file mode 100644 index 00000000..b263ed40 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/pktt16.h @@ -0,0 +1,2 @@ +require_rv64; +P_PK(16, 1, 1); diff --git a/vendor/riscv-isa-sim/riscv/insns/radd16.h b/vendor/riscv-isa-sim/riscv/insns/radd16.h new file mode 100644 index 00000000..8f800502 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/radd16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/radd32.h b/vendor/riscv-isa-sim/riscv/insns/radd32.h new file mode 100644 index 00000000..df50dd17 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/radd32.h @@ -0,0 +1,4 @@ +require_rv64; +P_LOOP(32, { + pd = ((int64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/radd64.h b/vendor/riscv-isa-sim/riscv/insns/radd64.h new file mode 100644 index 00000000..110c472c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/radd64.h @@ -0,0 +1,8 @@ +P_64_PROFILE({ + rd = (rs1 + rs2) >> 1; + if (rs1 > 0 && rs2 > 0) { + rd &= ~((reg_t)1 << 63); + } else if (rs1 < 0 && rs2 < 0) { + rd |= ((reg_t)1 << 63); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/radd8.h b/vendor/riscv-isa-sim/riscv/insns/radd8.h new file mode 100644 index 00000000..ad0b6ece --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/radd8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/raddw.h b/vendor/riscv-isa-sim/riscv/insns/raddw.h new file mode 100644 index 00000000..ec04bb6d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/raddw.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SW(RS1, 0) + (sreg_t)P_SW(RS2, 0); +res >>= 1; +WRITE_RD(sext_xlen(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/rcras16.h b/vendor/riscv-isa-sim/riscv/insns/rcras16.h new file mode 100644 index 00000000..529c27fe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rcras16.h @@ -0,0 +1,5 @@ +P_CROSS_LOOP(16, { + pd = (ps1 + ps2) >> 1; +}, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rcras32.h b/vendor/riscv-isa-sim/riscv/insns/rcras32.h new file mode 100644 index 00000000..86a3f65e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rcras32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_LOOP(32, { + pd = ((int64_t)ps1 + ps2) >> 1; +}, { + pd = ((int64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rcrsa16.h b/vendor/riscv-isa-sim/riscv/insns/rcrsa16.h new file mode 100644 index 00000000..156e32c4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rcrsa16.h @@ -0,0 +1,5 @@ +P_CROSS_LOOP(16, { + pd = (ps1 - ps2) >> 1; +}, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rcrsa32.h b/vendor/riscv-isa-sim/riscv/insns/rcrsa32.h new file mode 100644 index 00000000..b45f31fe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rcrsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_LOOP(32, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rem.h b/vendor/riscv-isa-sim/riscv/insns/rem.h new file mode 100644 index 00000000..85879957 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rem.h @@ -0,0 +1,9 @@ +require_extension('M'); +sreg_t lhs = sext_xlen(RS1); +sreg_t rhs = sext_xlen(RS2); +if(rhs == 0) + WRITE_RD(lhs); +else if(lhs == INT64_MIN && rhs == -1) + WRITE_RD(0); +else + WRITE_RD(sext_xlen(lhs % rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/remu.h b/vendor/riscv-isa-sim/riscv/insns/remu.h new file mode 100644 index 00000000..e74774cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/remu.h @@ -0,0 +1,7 @@ +require_extension('M'); +reg_t lhs = zext_xlen(RS1); +reg_t rhs = zext_xlen(RS2); +if(rhs == 0) + WRITE_RD(sext_xlen(RS1)); +else + WRITE_RD(sext_xlen(lhs % rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/remuw.h b/vendor/riscv-isa-sim/riscv/insns/remuw.h new file mode 100644 index 00000000..b239c8f3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/remuw.h @@ -0,0 +1,8 @@ +require_extension('M'); +require_rv64; +reg_t lhs = zext32(RS1); +reg_t rhs = zext32(RS2); +if(rhs == 0) + WRITE_RD(sext32(lhs)); +else + WRITE_RD(sext32(lhs % rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/remw.h b/vendor/riscv-isa-sim/riscv/insns/remw.h new file mode 100644 index 00000000..56221ccd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/remw.h @@ -0,0 +1,8 @@ +require_extension('M'); +require_rv64; +sreg_t lhs = sext32(RS1); +sreg_t rhs = sext32(RS2); +if(rhs == 0) + WRITE_RD(lhs); +else + WRITE_RD(sext32(lhs % rhs)); diff --git a/vendor/riscv-isa-sim/riscv/insns/rol.h b/vendor/riscv-isa-sim/riscv/insns/rol.h new file mode 100644 index 00000000..07735a1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rol.h @@ -0,0 +1,4 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +int shamt = RS2 & (xlen-1); +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen((RS1 << shamt) | (zext_xlen(RS1) >> rshamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/rolw.h b/vendor/riscv-isa-sim/riscv/insns/rolw.h new file mode 100644 index 00000000..4d5eeb19 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rolw.h @@ -0,0 +1,5 @@ +require_rv64; +require_either_extension(EXT_ZBB, EXT_ZBKB); +int shamt = RS2 & 31; +int rshamt = -shamt & 31; +WRITE_RD(sext32((RS1 << shamt) | (zext32(RS1) >> rshamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/ror.h b/vendor/riscv-isa-sim/riscv/insns/ror.h new file mode 100644 index 00000000..61b5ff8f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ror.h @@ -0,0 +1,4 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +int shamt = RS2 & (xlen-1); +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen((RS1 << rshamt) | (zext_xlen(RS1) >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/rori.h b/vendor/riscv-isa-sim/riscv/insns/rori.h new file mode 100644 index 00000000..6585b60f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rori.h @@ -0,0 +1,5 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +require(SHAMT < xlen); +int shamt = SHAMT & (xlen-1); +int rshamt = -shamt & (xlen-1); +WRITE_RD(sext_xlen((RS1 << rshamt) | (zext_xlen(RS1) >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/roriw.h b/vendor/riscv-isa-sim/riscv/insns/roriw.h new file mode 100644 index 00000000..331d2264 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/roriw.h @@ -0,0 +1,6 @@ +require_rv64; +require_either_extension(EXT_ZBB, EXT_ZBKB); +require(SHAMT < 32); +int shamt = SHAMT & 31; +int rshamt = -shamt & 31; +WRITE_RD(sext32((RS1 << rshamt) | (zext32(RS1) >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/rorw.h b/vendor/riscv-isa-sim/riscv/insns/rorw.h new file mode 100644 index 00000000..65f0078c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rorw.h @@ -0,0 +1,5 @@ +require_rv64; +require_either_extension(EXT_ZBB, EXT_ZBKB); +int shamt = RS2 & 31; +int rshamt = -shamt & 31; +WRITE_RD(sext32((RS1 << rshamt) | (zext32(RS1) >> shamt))); diff --git a/vendor/riscv-isa-sim/riscv/insns/rstas16.h b/vendor/riscv-isa-sim/riscv/insns/rstas16.h new file mode 100644 index 00000000..298b5917 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rstas16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_LOOP(16, { + pd = (ps1 + ps2) >> 1; +}, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rstas32.h b/vendor/riscv-isa-sim/riscv/insns/rstas32.h new file mode 100644 index 00000000..9c8995a8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rstas32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_LOOP(32, { + pd = ((int64_t)ps1 + ps2) >> 1; +}, { + pd = ((int64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rstsa16.h b/vendor/riscv-isa-sim/riscv/insns/rstsa16.h new file mode 100644 index 00000000..443e4cef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rstsa16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_LOOP(16, { + pd = (ps1 - ps2) >> 1; +}, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rstsa32.h b/vendor/riscv-isa-sim/riscv/insns/rstsa32.h new file mode 100644 index 00000000..a89fc6ea --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rstsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_LOOP(32, { + pd = ((int64_t)ps1 - ps2) >> 1; +}, { + pd = ((int64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsub16.h b/vendor/riscv-isa-sim/riscv/insns/rsub16.h new file mode 100644 index 00000000..768e0677 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsub16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsub32.h b/vendor/riscv-isa-sim/riscv/insns/rsub32.h new file mode 100644 index 00000000..22c31199 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsub32.h @@ -0,0 +1,4 @@ +require_rv64; +P_LOOP(32, { + pd = ((int64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsub64.h b/vendor/riscv-isa-sim/riscv/insns/rsub64.h new file mode 100644 index 00000000..397c973d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsub64.h @@ -0,0 +1,8 @@ +P_64_PROFILE({ + rd = (rs1 - rs2) >> 1; + if (rs1 > 0 && rs2 < 0) { + rd &= ~((reg_t)1 << 63); + } else if(rs1 < 0 && rs2 > 0) { + rd |= ((reg_t)1 << 63); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsub8.h b/vendor/riscv-isa-sim/riscv/insns/rsub8.h new file mode 100644 index 00000000..9cf9c1a8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsub8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/rsubw.h b/vendor/riscv-isa-sim/riscv/insns/rsubw.h new file mode 100644 index 00000000..01dec51a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/rsubw.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_SW(RS1, 0) - (sreg_t)P_SW(RS2, 0); +res >>= 1; +WRITE_RD(sext_xlen(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sb.h b/vendor/riscv-isa-sim/riscv/insns/sb.h new file mode 100644 index 00000000..8729c2d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sb.h @@ -0,0 +1 @@ +MMU.store_uint8(RS1 + insn.s_imm(), RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/sc_d.h b/vendor/riscv-isa-sim/riscv/insns/sc_d.h new file mode 100644 index 00000000..54023ed4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sc_d.h @@ -0,0 +1,11 @@ +require_extension('A'); +require_rv64; + +bool have_reservation = MMU.check_load_reservation(RS1, 8); + +if (have_reservation) + MMU.store_uint64(RS1, RS2); + +MMU.yield_load_reservation(); + +WRITE_RD(!have_reservation); diff --git a/vendor/riscv-isa-sim/riscv/insns/sc_w.h b/vendor/riscv-isa-sim/riscv/insns/sc_w.h new file mode 100644 index 00000000..e430dcb2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sc_w.h @@ -0,0 +1,10 @@ +require_extension('A'); + +bool have_reservation = MMU.check_load_reservation(RS1, 4); + +if (have_reservation) + MMU.store_uint32(RS1, RS2); + +MMU.yield_load_reservation(); + +WRITE_RD(!have_reservation); diff --git a/vendor/riscv-isa-sim/riscv/insns/sclip16.h b/vendor/riscv-isa-sim/riscv/insns/sclip16.h new file mode 100644 index 00000000..d90ce19c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sclip16.h @@ -0,0 +1,14 @@ +require_vector_vs; +P_I_LOOP(16, 4, { + int64_t int_max = INT64_MAX >> (64 - (imm4u + 1)); + int64_t int_min = INT64_MIN >> (64 - (imm4u + 1)); + pd = ps1; + + if (ps1 > int_max) { + pd = int_max; + P_SET_OV(1); + } else if (ps1 < int_min) { + pd = int_min; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sclip32.h b/vendor/riscv-isa-sim/riscv/insns/sclip32.h new file mode 100644 index 00000000..ff1ba287 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sclip32.h @@ -0,0 +1,14 @@ +require_vector_vs; +P_I_LOOP(32, 5, { + int64_t int_max = INT64_MAX >> (64 - (imm5u + 1)); + int64_t int_min = INT64_MIN >> (64 - (imm5u + 1)); + pd = ps1; + + if (ps1 > int_max) { + pd = int_max; + P_SET_OV(1); + } else if (ps1 < int_min) { + pd = int_min; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sclip8.h b/vendor/riscv-isa-sim/riscv/insns/sclip8.h new file mode 100644 index 00000000..afd9c692 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sclip8.h @@ -0,0 +1,14 @@ +require_vector_vs; +P_I_LOOP(8, 3, { + int64_t int_max = INT64_MAX >> (64 - (imm3u + 1)); + int64_t int_min = INT64_MIN >> (64 - (imm3u + 1)); + pd = ps1; + + if (ps1 > int_max) { + pd = int_max; + P_SET_OV(1); + } else if (ps1 < int_min) { + pd = int_min; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/scmple16.h b/vendor/riscv-isa-sim/riscv/insns/scmple16.h new file mode 100644 index 00000000..060c04c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/scmple16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 <= ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/scmple8.h b/vendor/riscv-isa-sim/riscv/insns/scmple8.h new file mode 100644 index 00000000..8920c1f5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/scmple8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 <= ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/scmplt16.h b/vendor/riscv-isa-sim/riscv/insns/scmplt16.h new file mode 100644 index 00000000..db62f6f7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/scmplt16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 < ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/scmplt8.h b/vendor/riscv-isa-sim/riscv/insns/scmplt8.h new file mode 100644 index 00000000..7d072097 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/scmplt8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 < ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sd.h b/vendor/riscv-isa-sim/riscv/insns/sd.h new file mode 100644 index 00000000..664deb2c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sd.h @@ -0,0 +1,2 @@ +require_rv64; +MMU.store_uint64(RS1 + insn.s_imm(), RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/sext_b.h b/vendor/riscv-isa-sim/riscv/insns/sext_b.h new file mode 100644 index 00000000..5acde617 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sext_b.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBB); +WRITE_RD((sreg_t)(int8_t)(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sext_h.h b/vendor/riscv-isa-sim/riscv/insns/sext_h.h new file mode 100644 index 00000000..e89a68d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sext_h.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBB); +WRITE_RD((sreg_t)(int16_t)(RS1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sfence_inval_ir.h b/vendor/riscv-isa-sim/riscv/insns/sfence_inval_ir.h new file mode 100644 index 00000000..f566d632 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sfence_inval_ir.h @@ -0,0 +1,3 @@ +require_extension('S'); +require_extension(EXT_SVINVAL); +require_impl(IMPL_MMU); diff --git a/vendor/riscv-isa-sim/riscv/insns/sfence_vma.h b/vendor/riscv-isa-sim/riscv/insns/sfence_vma.h new file mode 100644 index 00000000..7d6c01a8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sfence_vma.h @@ -0,0 +1,9 @@ +require_extension('S'); +require_impl(IMPL_MMU); +if (STATE.v) { + if (STATE.prv == PRV_U || get_field(STATE.hstatus->read(), HSTATUS_VTVM)) + require_novirt(); +} else { + require_privilege(get_field(STATE.mstatus->read(), MSTATUS_TVM) ? PRV_M : PRV_S); +} +MMU.flush_tlb(); diff --git a/vendor/riscv-isa-sim/riscv/insns/sfence_w_inval.h b/vendor/riscv-isa-sim/riscv/insns/sfence_w_inval.h new file mode 100644 index 00000000..f566d632 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sfence_w_inval.h @@ -0,0 +1,3 @@ +require_extension('S'); +require_extension(EXT_SVINVAL); +require_impl(IMPL_MMU); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh.h b/vendor/riscv-isa-sim/riscv/insns/sh.h new file mode 100644 index 00000000..22aa3a88 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh.h @@ -0,0 +1 @@ +MMU.store_uint16(RS1 + insn.s_imm(), RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh1add.h b/vendor/riscv-isa-sim/riscv/insns/sh1add.h new file mode 100644 index 00000000..6cbc3605 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh1add.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((RS1 << 1) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh1add_uw.h b/vendor/riscv-isa-sim/riscv/insns/sh1add_uw.h new file mode 100644 index 00000000..11770026 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh1add_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((zext32(RS1) << 1) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh2add.h b/vendor/riscv-isa-sim/riscv/insns/sh2add.h new file mode 100644 index 00000000..ea55e79a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh2add.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((RS1 << 2) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh2add_uw.h b/vendor/riscv-isa-sim/riscv/insns/sh2add_uw.h new file mode 100644 index 00000000..b51250d9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh2add_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((zext32(RS1) << 2) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh3add.h b/vendor/riscv-isa-sim/riscv/insns/sh3add.h new file mode 100644 index 00000000..de71f0fa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh3add.h @@ -0,0 +1,2 @@ +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((RS1 << 3) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sh3add_uw.h b/vendor/riscv-isa-sim/riscv/insns/sh3add_uw.h new file mode 100644 index 00000000..b618b851 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sh3add_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen((zext32(RS1) << 3) + RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha256sig0.h b/vendor/riscv-isa-sim/riscv/insns/sha256sig0.h new file mode 100644 index 00000000..f86e42f7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha256sig0.h @@ -0,0 +1,13 @@ + +require_extension(EXT_ZKNH); + +#define ROR32(a,amt) ((a << (-amt & (32-1))) | (a >> (amt & (32-1)))) + +uint32_t a = RS1; + +WRITE_RD( + sext32(ROR32(a, 7) ^ ROR32(a,18) ^ (a >> 3)) +); + +#undef ROR32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha256sig1.h b/vendor/riscv-isa-sim/riscv/insns/sha256sig1.h new file mode 100644 index 00000000..72e586cb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha256sig1.h @@ -0,0 +1,13 @@ + +require_extension(EXT_ZKNH); + +#define ROR32(a,amt) ((a << (-amt & (32-1))) | (a >> (amt & (32-1)))) + +uint32_t a = RS1; + +WRITE_RD( + sext32(ROR32(a, 17) ^ ROR32(a,19) ^ (a >> 10)) +); + +#undef ROR32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha256sum0.h b/vendor/riscv-isa-sim/riscv/insns/sha256sum0.h new file mode 100644 index 00000000..f0aed47f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha256sum0.h @@ -0,0 +1,13 @@ + +require_extension(EXT_ZKNH); + +#define ROR32(a,amt) ((a << (-amt & (32-1))) | (a >> (amt & (32-1)))) + +uint32_t a = RS1; + +WRITE_RD( + sext32(ROR32(a, 2) ^ ROR32(a,13) ^ ROR32(a, 22)) +); + +#undef ROR32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha256sum1.h b/vendor/riscv-isa-sim/riscv/insns/sha256sum1.h new file mode 100644 index 00000000..41de5afe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha256sum1.h @@ -0,0 +1,13 @@ + +require_extension(EXT_ZKNH); + +#define ROR32(a,amt) ((a << (-amt & (32-1))) | (a >> (amt & (32-1)))) + +uint32_t a = RS1; + +WRITE_RD( + sext32(ROR32(a, 6) ^ ROR32(a,11) ^ ROR32(a, 25)) +); + +#undef ROR32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig0.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig0.h new file mode 100644 index 00000000..2efd763d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig0.h @@ -0,0 +1,13 @@ +require_rv64; +require_extension(EXT_ZKNH); + +#define ROR64(a,amt) ((a << (-amt & (64-1))) | (a >> (amt & (64-1)))) + +uint64_t a = RS1; + +WRITE_RD( + ROR64(a, 1) ^ ROR64(a, 8) ^ (a >> 7) +); + +#undef ROR64 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig0h.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig0h.h new file mode 100644 index 00000000..eb6a2a25 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig0h.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) >> 1) ^ (zext32(RS1) >> 7) ^ (zext32(RS1) >> 8) ^ + (zext32(RS2) << 31) ^ (zext32(RS2) << 24); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig0l.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig0l.h new file mode 100644 index 00000000..599a6a10 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig0l.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) >> 1) ^ (zext32(RS1) >> 7) ^ (zext32(RS1) >> 8) ^ + (zext32(RS2) << 31) ^ (zext32(RS2) << 25) ^ (zext32(RS2) << 24); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig1.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig1.h new file mode 100644 index 00000000..21766541 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig1.h @@ -0,0 +1,13 @@ +require_rv64; +require_extension(EXT_ZKNH); + +#define ROR64(a,amt) ((a << (-amt & (64-1))) | (a >> (amt & (64-1)))) + +uint64_t a = RS1; + +WRITE_RD( + ROR64(a, 19) ^ ROR64(a,61) ^ (a >> 6) +); + +#undef ROR64 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig1h.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig1h.h new file mode 100644 index 00000000..271a1f90 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig1h.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) << 3) ^ (zext32(RS1) >> 6) ^ (zext32(RS1) >> 19) ^ + (zext32(RS2) >> 29) ^ (zext32(RS2) << 13); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sig1l.h b/vendor/riscv-isa-sim/riscv/insns/sha512sig1l.h new file mode 100644 index 00000000..491810d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sig1l.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) << 3) ^ (zext32(RS1) >> 6) ^ (zext32(RS1) >> 19) ^ + (zext32(RS2) >> 29) ^ (zext32(RS2) << 26) ^ (zext32(RS2) << 13); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sum0.h b/vendor/riscv-isa-sim/riscv/insns/sha512sum0.h new file mode 100644 index 00000000..01182e67 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sum0.h @@ -0,0 +1,13 @@ +require_rv64; +require_extension(EXT_ZKNH); + +#define ROR64(a,amt) ((a << (-amt & (64-1))) | (a >> (amt & (64-1)))) + +uint64_t a = RS1; + +WRITE_RD( + ROR64(a, 28) ^ ROR64(a,34) ^ ROR64(a,39) +); + +#undef ROR64 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sum0r.h b/vendor/riscv-isa-sim/riscv/insns/sha512sum0r.h new file mode 100644 index 00000000..cb6c636f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sum0r.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) << 25) ^ (zext32(RS1) << 30) ^ (zext32(RS1) >> 28) ^ + (zext32(RS2) >> 7) ^ (zext32(RS2) >> 2) ^ (zext32(RS2) << 4); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sum1.h b/vendor/riscv-isa-sim/riscv/insns/sha512sum1.h new file mode 100644 index 00000000..267d7dd9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sum1.h @@ -0,0 +1,13 @@ +require_rv64; +require_extension(EXT_ZKNH); + +#define ROR64(a,amt) ((a << (-amt & (64-1))) | (a >> (amt & (64-1)))) + +uint64_t a = RS1; + +WRITE_RD( + ROR64(a, 14) ^ ROR64(a, 18) ^ ROR64(a, 41) +); + +#undef ROR64 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sha512sum1r.h b/vendor/riscv-isa-sim/riscv/insns/sha512sum1r.h new file mode 100644 index 00000000..8109d0dc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sha512sum1r.h @@ -0,0 +1,9 @@ + +require_rv32; +require_extension(EXT_ZKNH); + +reg_t result = + (zext32(RS1) << 23) ^ (zext32(RS1) >> 14) ^ (zext32(RS1) >> 18) ^ + (zext32(RS2) >> 9) ^ (zext32(RS2) << 18) ^ (zext32(RS2) << 14); + +WRITE_RD(sext_xlen(result)); diff --git a/vendor/riscv-isa-sim/riscv/insns/shfl.h b/vendor/riscv-isa-sim/riscv/insns/shfl.h new file mode 100644 index 00000000..3004871e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/shfl.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & ((xlen-1) >> 1); +if (shamt & 16) x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/shfli.h b/vendor/riscv-isa-sim/riscv/insns/shfli.h new file mode 100644 index 00000000..f8636190 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/shfli.h @@ -0,0 +1,12 @@ +// Zbkb contains zip but not general shfli +require(((insn.rs2() == (xlen / 2 - 1)) && p->extension_enabled(EXT_ZBKB)) + || p->extension_enabled(EXT_XZBP)); +require(SHAMT < (xlen/2)); +reg_t x = RS1; +int shamt = SHAMT & ((xlen-1) >> 1); +if (shamt & 16) x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/shflw.h b/vendor/riscv-isa-sim/riscv/insns/shflw.h new file mode 100644 index 00000000..06ee3604 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/shflw.h @@ -0,0 +1,9 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & 15; +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sinval_vma.h b/vendor/riscv-isa-sim/riscv/insns/sinval_vma.h new file mode 100644 index 00000000..5e431497 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sinval_vma.h @@ -0,0 +1,2 @@ +require_extension(EXT_SVINVAL); +#include "sfence_vma.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/sll.h b/vendor/riscv-isa-sim/riscv/insns/sll.h new file mode 100644 index 00000000..7db76131 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sll.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(RS1 << (RS2 & (xlen-1)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sll16.h b/vendor/riscv-isa-sim/riscv/insns/sll16.h new file mode 100644 index 00000000..9659f53e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sll16.h @@ -0,0 +1,3 @@ +P_X_ULOOP(16, 4, { + pd = ps1 << sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sll32.h b/vendor/riscv-isa-sim/riscv/insns/sll32.h new file mode 100644 index 00000000..8a05b39b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sll32.h @@ -0,0 +1,4 @@ +require_rv64; +P_X_ULOOP(32, 5, { + pd = ps1 << sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sll8.h b/vendor/riscv-isa-sim/riscv/insns/sll8.h new file mode 100644 index 00000000..b7f069a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sll8.h @@ -0,0 +1,3 @@ +P_X_ULOOP(8, 3, { + pd = ps1 << sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/slli.h b/vendor/riscv-isa-sim/riscv/insns/slli.h new file mode 100644 index 00000000..26782fda --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli.h @@ -0,0 +1,2 @@ +require(SHAMT < xlen); +WRITE_RD(sext_xlen(RS1 << SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/slli16.h b/vendor/riscv-isa-sim/riscv/insns/slli16.h new file mode 100644 index 00000000..8d89a61f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli16.h @@ -0,0 +1,3 @@ +P_I_ULOOP(16, 4, { + pd = ps1 << imm4u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/slli32.h b/vendor/riscv-isa-sim/riscv/insns/slli32.h new file mode 100644 index 00000000..71d880af --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli32.h @@ -0,0 +1,4 @@ +require_rv64; +P_I_ULOOP(32, 5, { + pd = ps1 << imm5u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/slli8.h b/vendor/riscv-isa-sim/riscv/insns/slli8.h new file mode 100644 index 00000000..c997496f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli8.h @@ -0,0 +1,3 @@ +P_I_ULOOP(8, 3, { + pd = ps1 << imm3u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/slli_uw.h b/vendor/riscv-isa-sim/riscv/insns/slli_uw.h new file mode 100644 index 00000000..9cd48a91 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slli_uw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZBA); +WRITE_RD(sext_xlen(zext32(RS1) << SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/slliw.h b/vendor/riscv-isa-sim/riscv/insns/slliw.h new file mode 100644 index 00000000..c1fda656 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slliw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(RS1 << SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sllw.h b/vendor/riscv-isa-sim/riscv/insns/sllw.h new file mode 100644 index 00000000..affe8944 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sllw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(RS1 << (RS2 & 0x1F))); diff --git a/vendor/riscv-isa-sim/riscv/insns/slo.h b/vendor/riscv-isa-sim/riscv/insns/slo.h new file mode 100644 index 00000000..a27ec37e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slo.h @@ -0,0 +1,2 @@ +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(~((~RS1) << (RS2 & (xlen-1))))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sloi.h b/vendor/riscv-isa-sim/riscv/insns/sloi.h new file mode 100644 index 00000000..62278b03 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sloi.h @@ -0,0 +1,3 @@ +require(SHAMT < xlen); +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(~((~RS1) << SHAMT))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sloiw.h b/vendor/riscv-isa-sim/riscv/insns/sloiw.h new file mode 100644 index 00000000..492c94a1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sloiw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(sext32(~((~RS1) << SHAMT))); diff --git a/vendor/riscv-isa-sim/riscv/insns/slow.h b/vendor/riscv-isa-sim/riscv/insns/slow.h new file mode 100644 index 00000000..04c90a45 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slow.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(sext32(~((~RS1) << (RS2 & 0x1F)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/slt.h b/vendor/riscv-isa-sim/riscv/insns/slt.h new file mode 100644 index 00000000..25ccd45e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slt.h @@ -0,0 +1 @@ +WRITE_RD(sreg_t(RS1) < sreg_t(RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/slti.h b/vendor/riscv-isa-sim/riscv/insns/slti.h new file mode 100644 index 00000000..3671d241 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/slti.h @@ -0,0 +1 @@ +WRITE_RD(sreg_t(RS1) < sreg_t(insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/sltiu.h b/vendor/riscv-isa-sim/riscv/insns/sltiu.h new file mode 100644 index 00000000..f3984571 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sltiu.h @@ -0,0 +1 @@ +WRITE_RD(RS1 < reg_t(insn.i_imm())); diff --git a/vendor/riscv-isa-sim/riscv/insns/sltu.h b/vendor/riscv-isa-sim/riscv/insns/sltu.h new file mode 100644 index 00000000..84d97a2a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sltu.h @@ -0,0 +1 @@ +WRITE_RD(RS1 < RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/sm3p0.h b/vendor/riscv-isa-sim/riscv/insns/sm3p0.h new file mode 100644 index 00000000..0a72a930 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm3p0.h @@ -0,0 +1,14 @@ + +require_extension(EXT_ZKSH); + +#define ROL32(a,amt) ((a >> (-amt & (32-1))) | (a << (amt & (32-1)))) + +uint32_t src = RS1; +uint32_t result = src ^ ROL32(src, 9) ^ ROL32(src, 17); + +WRITE_RD( + sext32(result) +); + +#undef ROL32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sm3p1.h b/vendor/riscv-isa-sim/riscv/insns/sm3p1.h new file mode 100644 index 00000000..ce3e36c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm3p1.h @@ -0,0 +1,14 @@ + +require_extension(EXT_ZKSH); + +#define ROL32(a,amt) ((a >> (-amt & (32-1))) | (a << (amt & (32-1)))) + +uint32_t src = RS1; +uint32_t result = src ^ ROL32(src, 15) ^ ROL32(src, 23); + +WRITE_RD( + sext32(result) +); + +#undef ROL32 + diff --git a/vendor/riscv-isa-sim/riscv/insns/sm4_common.h b/vendor/riscv-isa-sim/riscv/insns/sm4_common.h new file mode 100644 index 00000000..17f129f0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm4_common.h @@ -0,0 +1,27 @@ + +// SM4 forward SBox. SM4 has no inverse sbox. +static const uint8_t sm4_sbox[256] = { + 0xD6, 0x90, 0xE9, 0xFE, 0xCC, 0xE1, 0x3D, 0xB7, 0x16, 0xB6, 0x14, 0xC2, + 0x28, 0xFB, 0x2C, 0x05, 0x2B, 0x67, 0x9A, 0x76, 0x2A, 0xBE, 0x04, 0xC3, + 0xAA, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99, 0x9C, 0x42, 0x50, 0xF4, + 0x91, 0xEF, 0x98, 0x7A, 0x33, 0x54, 0x0B, 0x43, 0xED, 0xCF, 0xAC, 0x62, + 0xE4, 0xB3, 0x1C, 0xA9, 0xC9, 0x08, 0xE8, 0x95, 0x80, 0xDF, 0x94, 0xFA, + 0x75, 0x8F, 0x3F, 0xA6, 0x47, 0x07, 0xA7, 0xFC, 0xF3, 0x73, 0x17, 0xBA, + 0x83, 0x59, 0x3C, 0x19, 0xE6, 0x85, 0x4F, 0xA8, 0x68, 0x6B, 0x81, 0xB2, + 0x71, 0x64, 0xDA, 0x8B, 0xF8, 0xEB, 0x0F, 0x4B, 0x70, 0x56, 0x9D, 0x35, + 0x1E, 0x24, 0x0E, 0x5E, 0x63, 0x58, 0xD1, 0xA2, 0x25, 0x22, 0x7C, 0x3B, + 0x01, 0x21, 0x78, 0x87, 0xD4, 0x00, 0x46, 0x57, 0x9F, 0xD3, 0x27, 0x52, + 0x4C, 0x36, 0x02, 0xE7, 0xA0, 0xC4, 0xC8, 0x9E, 0xEA, 0xBF, 0x8A, 0xD2, + 0x40, 0xC7, 0x38, 0xB5, 0xA3, 0xF7, 0xF2, 0xCE, 0xF9, 0x61, 0x15, 0xA1, + 0xE0, 0xAE, 0x5D, 0xA4, 0x9B, 0x34, 0x1A, 0x55, 0xAD, 0x93, 0x32, 0x30, + 0xF5, 0x8C, 0xB1, 0xE3, 0x1D, 0xF6, 0xE2, 0x2E, 0x82, 0x66, 0xCA, 0x60, + 0xC0, 0x29, 0x23, 0xAB, 0x0D, 0x53, 0x4E, 0x6F, 0xD5, 0xDB, 0x37, 0x45, + 0xDE, 0xFD, 0x8E, 0x2F, 0x03, 0xFF, 0x6A, 0x72, 0x6D, 0x6C, 0x5B, 0x51, + 0x8D, 0x1B, 0xAF, 0x92, 0xBB, 0xDD, 0xBC, 0x7F, 0x11, 0xD9, 0x5C, 0x41, + 0x1F, 0x10, 0x5A, 0xD8, 0x0A, 0xC1, 0x31, 0x88, 0xA5, 0xCD, 0x7B, 0xBD, + 0x2D, 0x74, 0xD0, 0x12, 0xB8, 0xE5, 0xB4, 0xB0, 0x89, 0x69, 0x97, 0x4A, + 0x0C, 0x96, 0x77, 0x7E, 0x65, 0xB9, 0xF1, 0x09, 0xC5, 0x6E, 0xC6, 0x84, + 0x18, 0xF0, 0x7D, 0xEC, 0x3A, 0xDC, 0x4D, 0x20, 0x79, 0xEE, 0x5F, 0x3E, + 0xD7, 0xCB, 0x39, 0x48 +}; + diff --git a/vendor/riscv-isa-sim/riscv/insns/sm4ed.h b/vendor/riscv-isa-sim/riscv/insns/sm4ed.h new file mode 100644 index 00000000..a78c1a87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm4ed.h @@ -0,0 +1,22 @@ + +require_extension(EXT_ZKSED); + +#include "sm4_common.h" + +uint8_t bs = insn.bs(); + +uint32_t sb_in = (RS2 >> (8*bs)) & 0xFF; +uint32_t sb_out = (uint32_t)sm4_sbox[sb_in]; + +uint32_t linear = sb_out ^ (sb_out << 8) ^ + (sb_out << 2) ^ + (sb_out << 18) ^ + ((sb_out & 0x3f) << 26) ^ + ((sb_out & 0xC0) << 10) ; + +uint32_t rotl = (linear << (8*bs)) | (linear >> (32-8*bs)); + +uint32_t result = rotl ^ RS1; + +WRITE_RD(sext32(result)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/sm4ks.h b/vendor/riscv-isa-sim/riscv/insns/sm4ks.h new file mode 100644 index 00000000..c93c97ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sm4ks.h @@ -0,0 +1,20 @@ + +require_extension(EXT_ZKSED); + +#include "sm4_common.h" + +uint8_t bs = insn.bs(); + +uint32_t sb_in = (RS2 >> (8*bs)) & 0xFF; +uint32_t sb_out = sm4_sbox[sb_in]; + +uint32_t x = sb_out ^ + ((sb_out & 0x07) << 29) ^ ((sb_out & 0xFE) << 7) ^ + ((sb_out & 0x01) << 23) ^ ((sb_out & 0xF8) << 13) ; + +uint32_t rotl = (x << (8*bs)) | (x >> (32-8*bs)); + +uint32_t result = rotl ^ RS1; + +WRITE_RD(sext32(result)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/smal.h b/vendor/riscv-isa-sim/riscv/insns/smal.h new file mode 100644 index 00000000..09b818d7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smal.h @@ -0,0 +1,11 @@ +require_extension(EXT_ZPSFOPERAND); +sreg_t res = 0; +if (xlen == 32) { + res = RS1_PAIR; + res += sext_xlen(P_SH(RS2, 0) * P_SH(RS2, 1)); + WRITE_RD_PAIR(res); +} else { + res = sext_xlen(P_SH(RS2, 0) * P_SH(RS2, 1)) + + sext_xlen(P_SH(RS2, 2) * P_SH(RS2, 3)) + RS1; + WRITE_RD(res); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/smalbb.h b/vendor/riscv-isa-sim/riscv/insns/smalbb.h new file mode 100644 index 00000000..9a2e7994 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalbb.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd += (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 0); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalbt.h b/vendor/riscv-isa-sim/riscv/insns/smalbt.h new file mode 100644 index 00000000..42cf71cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalbt.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd += (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalda.h b/vendor/riscv-isa-sim/riscv/insns/smalda.h new file mode 100644 index 00000000..8c067939 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalda.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(16, { + rd += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smaldrs.h b/vendor/riscv-isa-sim/riscv/insns/smaldrs.h new file mode 100644 index 00000000..84e17699 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smaldrs.h @@ -0,0 +1,7 @@ +P_64_PROFILE_REDUCTION(16, { + if (i & 1) { + rd -= ps1 * ps2; + } else { + rd += ps1 * ps2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalds.h b/vendor/riscv-isa-sim/riscv/insns/smalds.h new file mode 100644 index 00000000..e3cfbd7c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalds.h @@ -0,0 +1,7 @@ +P_64_PROFILE_REDUCTION(16, { + if (i & 1) { + rd += ps1 * ps2; + } else { + rd -= ps1 * ps2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smaltt.h b/vendor/riscv-isa-sim/riscv/insns/smaltt.h new file mode 100644 index 00000000..1e654a06 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smaltt.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd += P_SH(ps1, 1) * P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalxda.h b/vendor/riscv-isa-sim/riscv/insns/smalxda.h new file mode 100644 index 00000000..77675ee9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalxda.h @@ -0,0 +1,4 @@ +P_64_PROFILE_REDUCTION(32, { + rd += (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 1); + rd += (sreg_t)P_SH(ps1, 1) * (sreg_t)P_SH(ps2, 0); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smalxds.h b/vendor/riscv-isa-sim/riscv/insns/smalxds.h new file mode 100644 index 00000000..2ae7eb53 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smalxds.h @@ -0,0 +1,4 @@ +P_64_PROFILE_REDUCTION(32, { + rd += (sreg_t)P_SH(ps1, 1) * (sreg_t)P_SH(ps2, 0); + rd -= (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smaqa.h b/vendor/riscv-isa-sim/riscv/insns/smaqa.h new file mode 100644 index 00000000..83dda84f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smaqa.h @@ -0,0 +1,3 @@ +P_REDUCTION_LOOP(32, 8, true, false, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smaqa_su.h b/vendor/riscv-isa-sim/riscv/insns/smaqa_su.h new file mode 100644 index 00000000..4ee0eb78 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smaqa_su.h @@ -0,0 +1,3 @@ +P_REDUCTION_SULOOP(32, 8, true, false, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smar64.h b/vendor/riscv-isa-sim/riscv/insns/smar64.h new file mode 100644 index 00000000..5c5da771 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smar64.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smax16.h b/vendor/riscv-isa-sim/riscv/insns/smax16.h new file mode 100644 index 00000000..083d63ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smax16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smax32.h b/vendor/riscv-isa-sim/riscv/insns/smax32.h new file mode 100644 index 00000000..6563cfc1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smax32.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smax8.h b/vendor/riscv-isa-sim/riscv/insns/smax8.h new file mode 100644 index 00000000..773039e3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smax8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smbb16.h b/vendor/riscv-isa-sim/riscv/insns/smbb16.h new file mode 100644 index 00000000..0813bfbf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smbb16.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = P_SH(ps1, 0) * P_SH(ps2, 0); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smbt16.h b/vendor/riscv-isa-sim/riscv/insns/smbt16.h new file mode 100644 index 00000000..953b3a62 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smbt16.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = P_SH(ps1, 0) * P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smbt32.h b/vendor/riscv-isa-sim/riscv/insns/smbt32.h new file mode 100644 index 00000000..35059ad3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smbt32.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZPN); +WRITE_RD((sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/smdrs.h b/vendor/riscv-isa-sim/riscv/insns/smdrs.h new file mode 100644 index 00000000..8f47f7d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smdrs.h @@ -0,0 +1,6 @@ +P_REDUCTION_LOOP(32, 16, false, false, { + if (j & 1) + pd_res -= ps1 * ps2; + else + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smdrs32.h b/vendor/riscv-isa-sim/riscv/insns/smdrs32.h new file mode 100644 index 00000000..c397013d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smdrs32.h @@ -0,0 +1,7 @@ +require_rv64; +require_extension(EXT_ZPN); + +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD(mres0 - mres1); diff --git a/vendor/riscv-isa-sim/riscv/insns/smds.h b/vendor/riscv-isa-sim/riscv/insns/smds.h new file mode 100644 index 00000000..248679a5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smds.h @@ -0,0 +1,6 @@ +P_REDUCTION_LOOP(32, 16, false, false, { + if (j & 1) + pd_res += ps1 * ps2; + else + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smds32.h b/vendor/riscv-isa-sim/riscv/insns/smds32.h new file mode 100644 index 00000000..e7fdeedd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smds32.h @@ -0,0 +1,7 @@ +require_rv64; +require_extension(EXT_ZPN); + +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 0); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1); + +WRITE_RD(mres1 - mres0); diff --git a/vendor/riscv-isa-sim/riscv/insns/smin16.h b/vendor/riscv-isa-sim/riscv/insns/smin16.h new file mode 100644 index 00000000..afb1bb3b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smin16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smin32.h b/vendor/riscv-isa-sim/riscv/insns/smin32.h new file mode 100644 index 00000000..22847cb4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smin32.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smin8.h b/vendor/riscv-isa-sim/riscv/insns/smin8.h new file mode 100644 index 00000000..084e0e66 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smin8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmul.h b/vendor/riscv-isa-sim/riscv/insns/smmul.h new file mode 100644 index 00000000..df0dd239 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmul.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + pd = mres >> 32; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmul_u.h b/vendor/riscv-isa-sim/riscv/insns/smmul_u.h new file mode 100644 index 00000000..55fa617f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmul_u.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int64_t) ps2; + pd = ((mres >> 31) + 1) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmwb.h b/vendor/riscv-isa-sim/riscv/insns/smmwb.h new file mode 100644 index 00000000..f94aa9c0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmwb.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int16_t) P_H(ps2, 0); + pd = mres >> 16; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmwb_u.h b/vendor/riscv-isa-sim/riscv/insns/smmwb_u.h new file mode 100644 index 00000000..47c6e362 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmwb_u.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int16_t) P_H(ps2, 0); + pd = ((mres >> 15) + 1) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmwt.h b/vendor/riscv-isa-sim/riscv/insns/smmwt.h new file mode 100644 index 00000000..d8cf439f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmwt.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int16_t) P_H(ps2, 1); + pd = mres >> 16; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smmwt_u.h b/vendor/riscv-isa-sim/riscv/insns/smmwt_u.h new file mode 100644 index 00000000..5c5a671f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smmwt_u.h @@ -0,0 +1,4 @@ +P_LOOP(32, { + int64_t mres = (int64_t) ps1 * (int16_t) P_H(ps2, 1); + pd = ((mres >> 15) + 1) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smslda.h b/vendor/riscv-isa-sim/riscv/insns/smslda.h new file mode 100644 index 00000000..f8389609 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smslda.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(16, { + rd -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smslxda.h b/vendor/riscv-isa-sim/riscv/insns/smslxda.h new file mode 100644 index 00000000..7e25f9b8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smslxda.h @@ -0,0 +1,4 @@ +P_64_PROFILE_REDUCTION(32, { + rd -= (sreg_t)P_SH(ps1, 1) * (sreg_t)P_SH(ps2, 0); + rd -= (sreg_t)P_SH(ps1, 0) * (sreg_t)P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smsr64.h b/vendor/riscv-isa-sim/riscv/insns/smsr64.h new file mode 100644 index 00000000..a43559fd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smsr64.h @@ -0,0 +1,3 @@ +P_64_PROFILE_REDUCTION(32, { + rd -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smtt16.h b/vendor/riscv-isa-sim/riscv/insns/smtt16.h new file mode 100644 index 00000000..e19c50a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smtt16.h @@ -0,0 +1,3 @@ +P_LOOP(32, { + pd = P_SH(ps1, 1) * P_SH(ps2, 1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smtt32.h b/vendor/riscv-isa-sim/riscv/insns/smtt32.h new file mode 100644 index 00000000..c7fd9e71 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smtt32.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_ZPN); +WRITE_RD((sreg_t)P_SW(RS1, 1) * P_SW(RS2, 1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/smul16.h b/vendor/riscv-isa-sim/riscv/insns/smul16.h new file mode 100644 index 00000000..8f87612d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smul16.h @@ -0,0 +1,3 @@ +P_MUL_LOOP(16, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smul8.h b/vendor/riscv-isa-sim/riscv/insns/smul8.h new file mode 100644 index 00000000..155e50e0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smul8.h @@ -0,0 +1,3 @@ +P_MUL_LOOP(8, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smulx16.h b/vendor/riscv-isa-sim/riscv/insns/smulx16.h new file mode 100644 index 00000000..14ae047f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smulx16.h @@ -0,0 +1,3 @@ +P_MUL_CROSS_LOOP(16, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smulx8.h b/vendor/riscv-isa-sim/riscv/insns/smulx8.h new file mode 100644 index 00000000..b5ae41ca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smulx8.h @@ -0,0 +1,3 @@ +P_MUL_CROSS_LOOP(8, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smxds.h b/vendor/riscv-isa-sim/riscv/insns/smxds.h new file mode 100644 index 00000000..845d01f6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smxds.h @@ -0,0 +1,6 @@ +P_REDUCTION_CROSS_LOOP(32, 16, false, false, { + if (j & 1) + pd_res += ps1 * ps2; + else + pd_res -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/smxds32.h b/vendor/riscv-isa-sim/riscv/insns/smxds32.h new file mode 100644 index 00000000..8eeedcf9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/smxds32.h @@ -0,0 +1,7 @@ +require_rv64; +require_extension(EXT_ZPN); + +sreg_t mres0 = (sreg_t)P_SW(RS1, 0) * P_SW(RS2, 1); +sreg_t mres1 = (sreg_t)P_SW(RS1, 1) * P_SW(RS2, 0); + +WRITE_RD(mres1 - mres0); diff --git a/vendor/riscv-isa-sim/riscv/insns/sra.h b/vendor/riscv-isa-sim/riscv/insns/sra.h new file mode 100644 index 00000000..403b9b73 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(sext_xlen(RS1) >> (RS2 & (xlen-1)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sra16.h b/vendor/riscv-isa-sim/riscv/insns/sra16.h new file mode 100644 index 00000000..84a40fb5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra16.h @@ -0,0 +1,3 @@ +P_X_LOOP(16, 4, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra16_u.h b/vendor/riscv-isa-sim/riscv/insns/sra16_u.h new file mode 100644 index 00000000..c28178e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra16_u.h @@ -0,0 +1,6 @@ +P_X_LOOP(16, 4, { + if(sa > 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra32.h b/vendor/riscv-isa-sim/riscv/insns/sra32.h new file mode 100644 index 00000000..8b192e0c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra32.h @@ -0,0 +1,4 @@ +require_rv64; +P_X_LOOP(32, 5, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra32_u.h b/vendor/riscv-isa-sim/riscv/insns/sra32_u.h new file mode 100644 index 00000000..e062a886 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra32_u.h @@ -0,0 +1,7 @@ +require_rv64; +P_X_LOOP(32, 5, { + if(sa > 0) + pd = (((uint64_t)(ps1 >> (sa - 1))) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra8.h b/vendor/riscv-isa-sim/riscv/insns/sra8.h new file mode 100644 index 00000000..de1bd64e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra8.h @@ -0,0 +1,3 @@ +P_X_LOOP(8, 3, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra8_u.h b/vendor/riscv-isa-sim/riscv/insns/sra8_u.h new file mode 100644 index 00000000..7061fc48 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra8_u.h @@ -0,0 +1,6 @@ +P_X_LOOP(8, 3, { + if(sa > 0) + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sra_u.h b/vendor/riscv-isa-sim/riscv/insns/sra_u.h new file mode 100644 index 00000000..d7c395b0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sra_u.h @@ -0,0 +1,9 @@ +require_extension(EXT_ZPN); +sreg_t rs1 = sext_xlen(RS1); +reg_t sa = get_field(RS2, make_mask64(0, xlen == 32 ? 5 : 6)); + +if (sa > 0) { + WRITE_RD(sext_xlen(((rs1 >> (sa - 1)) + 1) >> 1)); +} else { + WRITE_RD(sext_xlen(rs1)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/srai.h b/vendor/riscv-isa-sim/riscv/insns/srai.h new file mode 100644 index 00000000..7ae1d4e5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai.h @@ -0,0 +1,2 @@ +require(SHAMT < xlen); +WRITE_RD(sext_xlen(sext_xlen(RS1) >> SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/srai16.h b/vendor/riscv-isa-sim/riscv/insns/srai16.h new file mode 100644 index 00000000..63f98073 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai16.h @@ -0,0 +1,3 @@ +P_I_LOOP(16, 4, { + pd = ps1 >> imm4u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai16_u.h b/vendor/riscv-isa-sim/riscv/insns/srai16_u.h new file mode 100644 index 00000000..d7835817 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai16_u.h @@ -0,0 +1,6 @@ +P_I_LOOP(16, 4, { + if (imm4u > 0) + pd = ((ps1 >> (imm4u - 1)) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai32.h b/vendor/riscv-isa-sim/riscv/insns/srai32.h new file mode 100644 index 00000000..9058ba9d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai32.h @@ -0,0 +1,4 @@ +require_rv64; +P_I_LOOP(32, 5, { + pd = ps1 >> imm5u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai32_u.h b/vendor/riscv-isa-sim/riscv/insns/srai32_u.h new file mode 100644 index 00000000..a5fe4d3d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai32_u.h @@ -0,0 +1,7 @@ +require_rv64; +P_I_LOOP(32, 5, { + if (imm5u > 0) + pd = (((uint64_t)(ps1 >> (imm5u - 1))) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai8.h b/vendor/riscv-isa-sim/riscv/insns/srai8.h new file mode 100644 index 00000000..0141933e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai8.h @@ -0,0 +1,3 @@ +P_I_LOOP(8, 3, { + pd = ps1 >> imm3u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai8_u.h b/vendor/riscv-isa-sim/riscv/insns/srai8_u.h new file mode 100644 index 00000000..be7bfaf7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai8_u.h @@ -0,0 +1,6 @@ +P_I_LOOP(8, 3, { + if (imm3u > 0) + pd = ((ps1 >> (imm3u - 1)) + 1) >> 1; + else + pd = ps1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srai_u.h b/vendor/riscv-isa-sim/riscv/insns/srai_u.h new file mode 100644 index 00000000..f170083e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srai_u.h @@ -0,0 +1,10 @@ +require_extension(EXT_ZPN); +sreg_t rs1 = sext_xlen(RS1); +reg_t sa = insn.p_imm6(); +require(sa < (unsigned long)xlen); // imm[5] == 1 is illegal on rv32 + +if (sa > 0) { + WRITE_RD(sext_xlen(((rs1 >> (sa - 1)) + 1) >> 1)); +} else { + WRITE_RD(sext_xlen(rs1)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/sraiw.h b/vendor/riscv-isa-sim/riscv/insns/sraiw.h new file mode 100644 index 00000000..b344459b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sraiw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(int32_t(RS1) >> SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sraiw_u.h b/vendor/riscv-isa-sim/riscv/insns/sraiw_u.h new file mode 100644 index 00000000..3559d7fa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sraiw_u.h @@ -0,0 +1,9 @@ +require_rv64; +require_extension(EXT_ZPN); + +reg_t sa = insn.p_imm5(); +if (sa != 0) { + WRITE_RD(sext32(((P_SW(RS1, 0) >> (sa - 1)) + 1) >> 1)); +} else { + WRITE_RD(sext32(P_SW(RS1, 0))); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/sraw.h b/vendor/riscv-isa-sim/riscv/insns/sraw.h new file mode 100644 index 00000000..ca9c0c76 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sraw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32(int32_t(RS1) >> (RS2 & 0x1F))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sret.h b/vendor/riscv-isa-sim/riscv/insns/sret.h new file mode 100644 index 00000000..5102c15c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sret.h @@ -0,0 +1,27 @@ +require_extension('S'); +reg_t prev_hstatus = STATE.hstatus->read(); +if (STATE.v) { + if (STATE.prv == PRV_U || get_field(prev_hstatus, HSTATUS_VTSR)) + require_novirt(); +} else { + require_privilege(get_field(STATE.mstatus->read(), MSTATUS_TSR) ? PRV_M : PRV_S); +} +reg_t next_pc = p->get_state()->sepc->read(); +set_pc_and_serialize(next_pc); +reg_t s = STATE.sstatus->read(); +reg_t prev_prv = get_field(s, MSTATUS_SPP); +s = set_field(s, MSTATUS_SIE, get_field(s, MSTATUS_SPIE)); +s = set_field(s, MSTATUS_SPIE, 1); +s = set_field(s, MSTATUS_SPP, PRV_U); +STATE.sstatus->write(s); +p->set_privilege(prev_prv); +if (!STATE.v) { + if (p->extension_enabled('H')) { + reg_t prev_virt = get_field(prev_hstatus, HSTATUS_SPV); + p->set_virt(prev_virt); + reg_t new_hstatus = set_field(prev_hstatus, HSTATUS_SPV, 0); + STATE.hstatus->write(new_hstatus); + } + + STATE.mstatus->write(set_field(STATE.mstatus->read(), MSTATUS_MPRV, 0)); +} diff --git a/vendor/riscv-isa-sim/riscv/insns/srl.h b/vendor/riscv-isa-sim/riscv/insns/srl.h new file mode 100644 index 00000000..0dabe9ec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(zext_xlen(RS1) >> (RS2 & (xlen-1)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/srl16.h b/vendor/riscv-isa-sim/riscv/insns/srl16.h new file mode 100644 index 00000000..35f9cecb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl16.h @@ -0,0 +1,3 @@ +P_X_ULOOP(16, 4, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl16_u.h b/vendor/riscv-isa-sim/riscv/insns/srl16_u.h new file mode 100644 index 00000000..17d1bc00 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl16_u.h @@ -0,0 +1,7 @@ +P_X_ULOOP(16, 4, { + if (sa > 0) { + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl32.h b/vendor/riscv-isa-sim/riscv/insns/srl32.h new file mode 100644 index 00000000..2ad116cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl32.h @@ -0,0 +1,4 @@ +require_rv64; +P_X_ULOOP(32, 5, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl32_u.h b/vendor/riscv-isa-sim/riscv/insns/srl32_u.h new file mode 100644 index 00000000..d6375469 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl32_u.h @@ -0,0 +1,8 @@ +require_rv64; +P_X_ULOOP(32, 5, { + if (sa > 0) { + pd = (((uint64_t)(ps1 >> (sa - 1))) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl8.h b/vendor/riscv-isa-sim/riscv/insns/srl8.h new file mode 100644 index 00000000..f7d74a94 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl8.h @@ -0,0 +1,3 @@ +P_X_ULOOP(8, 3, { + pd = ps1 >> sa; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srl8_u.h b/vendor/riscv-isa-sim/riscv/insns/srl8_u.h new file mode 100644 index 00000000..26415a56 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srl8_u.h @@ -0,0 +1,7 @@ +P_X_ULOOP(8, 3, { + if (sa > 0) { + pd = ((ps1 >> (sa - 1)) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli.h b/vendor/riscv-isa-sim/riscv/insns/srli.h new file mode 100644 index 00000000..ea0b40d6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli.h @@ -0,0 +1,2 @@ +require(SHAMT < xlen); +WRITE_RD(sext_xlen(zext_xlen(RS1) >> SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/srli16.h b/vendor/riscv-isa-sim/riscv/insns/srli16.h new file mode 100644 index 00000000..cbd685ff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli16.h @@ -0,0 +1,3 @@ +P_I_ULOOP(16, 4, { + pd = ps1 >> imm4u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli16_u.h b/vendor/riscv-isa-sim/riscv/insns/srli16_u.h new file mode 100644 index 00000000..2ba533a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli16_u.h @@ -0,0 +1,7 @@ +P_I_ULOOP(16, 4, { + if (imm4u > 0) { + pd = ((ps1 >> (imm4u - 1)) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli32.h b/vendor/riscv-isa-sim/riscv/insns/srli32.h new file mode 100644 index 00000000..f3d53af4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli32.h @@ -0,0 +1,4 @@ +require_rv64; +P_I_ULOOP(32, 5, { + pd = ps1 >> imm5u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli32_u.h b/vendor/riscv-isa-sim/riscv/insns/srli32_u.h new file mode 100644 index 00000000..6d2327fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli32_u.h @@ -0,0 +1,8 @@ +require_rv64; +P_I_ULOOP(32, 5, { + if (imm5u > 0) { + pd = (((uint64_t)(ps1 >> (imm5u - 1))) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli8.h b/vendor/riscv-isa-sim/riscv/insns/srli8.h new file mode 100644 index 00000000..103f0ed2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli8.h @@ -0,0 +1,3 @@ +P_I_ULOOP(8, 3, { + pd = ps1 >> imm3u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srli8_u.h b/vendor/riscv-isa-sim/riscv/insns/srli8_u.h new file mode 100644 index 00000000..9fa7f8c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srli8_u.h @@ -0,0 +1,7 @@ +P_I_ULOOP(8, 3, { + if (imm3u > 0) { + pd = ((ps1 >> (imm3u - 1)) + 1) >> 1; + } else { + pd = ps1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/srliw.h b/vendor/riscv-isa-sim/riscv/insns/srliw.h new file mode 100644 index 00000000..c657d3da --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srliw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32((uint32_t)RS1 >> SHAMT)); diff --git a/vendor/riscv-isa-sim/riscv/insns/srlw.h b/vendor/riscv-isa-sim/riscv/insns/srlw.h new file mode 100644 index 00000000..a8eb4519 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srlw.h @@ -0,0 +1,2 @@ +require_rv64; +WRITE_RD(sext32((uint32_t)RS1 >> (RS2 & 0x1F))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sro.h b/vendor/riscv-isa-sim/riscv/insns/sro.h new file mode 100644 index 00000000..3ac050da --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sro.h @@ -0,0 +1,2 @@ +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(~((zext_xlen(~RS1)) >> (RS2 & (xlen-1))))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sroi.h b/vendor/riscv-isa-sim/riscv/insns/sroi.h new file mode 100644 index 00000000..e8788928 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sroi.h @@ -0,0 +1,3 @@ +require(SHAMT < xlen); +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(~((zext_xlen(~RS1)) >> SHAMT))); diff --git a/vendor/riscv-isa-sim/riscv/insns/sroiw.h b/vendor/riscv-isa-sim/riscv/insns/sroiw.h new file mode 100644 index 00000000..83480705 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sroiw.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(sext32(~((~(uint32_t)RS1) >> SHAMT))); diff --git a/vendor/riscv-isa-sim/riscv/insns/srow.h b/vendor/riscv-isa-sim/riscv/insns/srow.h new file mode 100644 index 00000000..808af8db --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/srow.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(sext32(~((~(uint32_t)RS1) >> (RS2 & 0x1F)))); diff --git a/vendor/riscv-isa-sim/riscv/insns/stas16.h b/vendor/riscv-isa-sim/riscv/insns/stas16.h new file mode 100644 index 00000000..949e5c85 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/stas16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_LOOP(16, { + pd = ps1 + ps2; +}, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/stas32.h b/vendor/riscv-isa-sim/riscv/insns/stas32.h new file mode 100644 index 00000000..2009a693 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/stas32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_LOOP(32, { + pd = ps1 + ps2; +}, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/stsa16.h b/vendor/riscv-isa-sim/riscv/insns/stsa16.h new file mode 100644 index 00000000..7e4371d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/stsa16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_LOOP(16, { + pd = ps1 - ps2; +}, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/stsa32.h b/vendor/riscv-isa-sim/riscv/insns/stsa32.h new file mode 100644 index 00000000..e2d81b70 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/stsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_LOOP(32, { + pd = ps1 - ps2; +}, { + pd = ps1 + ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sub.h b/vendor/riscv-isa-sim/riscv/insns/sub.h new file mode 100644 index 00000000..9ed48f74 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub.h @@ -0,0 +1 @@ +WRITE_RD(sext_xlen(RS1 - RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/sub16.h b/vendor/riscv-isa-sim/riscv/insns/sub16.h new file mode 100644 index 00000000..5d36aaf4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub16.h @@ -0,0 +1,3 @@ +P_LOOP(16, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sub32.h b/vendor/riscv-isa-sim/riscv/insns/sub32.h new file mode 100644 index 00000000..70bbc53c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub32.h @@ -0,0 +1,4 @@ +require_rv64; +P_LOOP(32, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sub64.h b/vendor/riscv-isa-sim/riscv/insns/sub64.h new file mode 100644 index 00000000..d9775264 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub64.h @@ -0,0 +1,3 @@ +P_64_PROFILE({ + rd = rs1 - rs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/sub8.h b/vendor/riscv-isa-sim/riscv/insns/sub8.h new file mode 100644 index 00000000..7f13d615 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sub8.h @@ -0,0 +1,3 @@ +P_LOOP(8, { + pd = ps1 - ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/subw.h b/vendor/riscv-isa-sim/riscv/insns/subw.h new file mode 100644 index 00000000..b4168efe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/subw.h @@ -0,0 +1,3 @@ +require_rv64; +WRITE_RD(sext32(RS1 - RS2)); + diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd810.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd810.h new file mode 100644 index 00000000..2a2f7c32 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd810.h @@ -0,0 +1 @@ +P_SUNPKD8(1, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd820.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd820.h new file mode 100644 index 00000000..84d5248b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd820.h @@ -0,0 +1 @@ +P_SUNPKD8(2, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd830.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd830.h new file mode 100644 index 00000000..88179075 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd830.h @@ -0,0 +1 @@ +P_SUNPKD8(3, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd831.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd831.h new file mode 100644 index 00000000..98ed748d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd831.h @@ -0,0 +1 @@ +P_SUNPKD8(3, 1) diff --git a/vendor/riscv-isa-sim/riscv/insns/sunpkd832.h b/vendor/riscv-isa-sim/riscv/insns/sunpkd832.h new file mode 100644 index 00000000..b0ac29f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sunpkd832.h @@ -0,0 +1 @@ +P_SUNPKD8(3, 2) diff --git a/vendor/riscv-isa-sim/riscv/insns/sw.h b/vendor/riscv-isa-sim/riscv/insns/sw.h new file mode 100644 index 00000000..aa5ead37 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/sw.h @@ -0,0 +1 @@ +MMU.store_uint32(RS1 + insn.s_imm(), RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/uclip16.h b/vendor/riscv-isa-sim/riscv/insns/uclip16.h new file mode 100644 index 00000000..4cc25190 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uclip16.h @@ -0,0 +1,13 @@ +require_vector_vs; +P_I_LOOP(16, 4, { + int64_t uint_max = imm4u ? UINT64_MAX >> (64 - imm4u) : 0; + pd = ps1; + + if (ps1 > uint_max) { + pd = uint_max; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = 0; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uclip32.h b/vendor/riscv-isa-sim/riscv/insns/uclip32.h new file mode 100644 index 00000000..d347650c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uclip32.h @@ -0,0 +1,13 @@ +require_vector_vs; +P_I_LOOP(32, 5, { + int64_t uint_max = imm5u ? UINT64_MAX >> (64 - imm5u) : 0; + pd = ps1; + + if (ps1 > uint_max) { + pd = uint_max; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = 0; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uclip8.h b/vendor/riscv-isa-sim/riscv/insns/uclip8.h new file mode 100644 index 00000000..b8a95c07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uclip8.h @@ -0,0 +1,13 @@ +require_vector_vs; +P_I_LOOP(8, 3, { + int64_t uint_max = imm3u ? UINT64_MAX >> (64 - imm3u) : 0; + pd = ps1; + + if (ps1 > uint_max) { + pd = uint_max; + P_SET_OV(1); + } else if (ps1 < 0) { + pd = 0; + P_SET_OV(1); + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ucmple16.h b/vendor/riscv-isa-sim/riscv/insns/ucmple16.h new file mode 100644 index 00000000..fe2b93fb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ucmple16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 <= ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ucmple8.h b/vendor/riscv-isa-sim/riscv/insns/ucmple8.h new file mode 100644 index 00000000..bd44cb0e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ucmple8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 <= ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ucmplt16.h b/vendor/riscv-isa-sim/riscv/insns/ucmplt16.h new file mode 100644 index 00000000..fa7512c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ucmplt16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 < ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ucmplt8.h b/vendor/riscv-isa-sim/riscv/insns/ucmplt8.h new file mode 100644 index 00000000..6fa85b1a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ucmplt8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 < ps2) ? -1 : 0; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukadd16.h b/vendor/riscv-isa-sim/riscv/insns/ukadd16.h new file mode 100644 index 00000000..680b5b63 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukadd16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_ULOOP(16, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukadd32.h b/vendor/riscv-isa-sim/riscv/insns/ukadd32.h new file mode 100644 index 00000000..dd836c1e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukadd32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_ULOOP(32, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukadd64.h b/vendor/riscv-isa-sim/riscv/insns/ukadd64.h new file mode 100644 index 00000000..d7e98f34 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukadd64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_UPROFILE({ + bool sat = false; + rd = (sat_addu(rs1, rs2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukadd8.h b/vendor/riscv-isa-sim/riscv/insns/ukadd8.h new file mode 100644 index 00000000..4bcada20 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukadd8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_ULOOP(8, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukaddh.h b/vendor/riscv-isa-sim/riscv/insns/ukaddh.h new file mode 100644 index 00000000..30c970a3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukaddh.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_H(RS1, 0) + (sreg_t)P_H(RS2, 0); +P_SATU(res, 16); +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ukaddw.h b/vendor/riscv-isa-sim/riscv/insns/ukaddw.h new file mode 100644 index 00000000..5d4d91ef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukaddw.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_W(RS1, 0) + (sreg_t)P_W(RS2, 0); +P_SATU(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/ukcras16.h b/vendor/riscv-isa-sim/riscv/insns/ukcras16.h new file mode 100644 index 00000000..54b9a104 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukcras16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_CROSS_ULOOP(16, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukcras32.h b/vendor/riscv-isa-sim/riscv/insns/ukcras32.h new file mode 100644 index 00000000..001644c2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukcras32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_CROSS_ULOOP(32, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukcrsa16.h b/vendor/riscv-isa-sim/riscv/insns/ukcrsa16.h new file mode 100644 index 00000000..343063ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukcrsa16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_CROSS_ULOOP(16, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukcrsa32.h b/vendor/riscv-isa-sim/riscv/insns/ukcrsa32.h new file mode 100644 index 00000000..260f181e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukcrsa32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_CROSS_ULOOP(32, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukmar64.h b/vendor/riscv-isa-sim/riscv/insns/ukmar64.h new file mode 100644 index 00000000..e33ad7d8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukmar64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_UPROFILE_REDUCTION(32, { + bool sat = false; + rd = (sat_addu(rd, ps1 * ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukmsr64.h b/vendor/riscv-isa-sim/riscv/insns/ukmsr64.h new file mode 100644 index 00000000..9a38cb07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukmsr64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_UPROFILE_REDUCTION(32, { + bool sat = false; + rd = (sat_subu(rd, ps1 * ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukstas16.h b/vendor/riscv-isa-sim/riscv/insns/ukstas16.h new file mode 100644 index 00000000..a8203e43 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukstas16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_STRAIGHT_ULOOP(16, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukstas32.h b/vendor/riscv-isa-sim/riscv/insns/ukstas32.h new file mode 100644 index 00000000..c734eb6c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukstas32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_STRAIGHT_ULOOP(32, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukstsa16.h b/vendor/riscv-isa-sim/riscv/insns/ukstsa16.h new file mode 100644 index 00000000..81451112 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukstsa16.h @@ -0,0 +1,10 @@ +require_vector_vs; +P_STRAIGHT_ULOOP(16, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ukstsa32.h b/vendor/riscv-isa-sim/riscv/insns/ukstsa32.h new file mode 100644 index 00000000..9eb713ec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ukstsa32.h @@ -0,0 +1,11 @@ +require_vector_vs; +require_rv64; +P_STRAIGHT_ULOOP(32, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}, { + bool sat = false; + pd = (sat_addu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksub16.h b/vendor/riscv-isa-sim/riscv/insns/uksub16.h new file mode 100644 index 00000000..7fba16e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksub16.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_ULOOP(16, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksub32.h b/vendor/riscv-isa-sim/riscv/insns/uksub32.h new file mode 100644 index 00000000..3d4913bb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksub32.h @@ -0,0 +1,7 @@ +require_vector_vs; +require_rv64; +P_ULOOP(32, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksub64.h b/vendor/riscv-isa-sim/riscv/insns/uksub64.h new file mode 100644 index 00000000..0d2bb050 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksub64.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_64_UPROFILE({ + bool sat = false; + rd = (sat_subu(rs1, rs2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksub8.h b/vendor/riscv-isa-sim/riscv/insns/uksub8.h new file mode 100644 index 00000000..f26621e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksub8.h @@ -0,0 +1,6 @@ +require_vector_vs; +P_ULOOP(8, { + bool sat = false; + pd = (sat_subu(ps1, ps2, sat)); + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uksubh.h b/vendor/riscv-isa-sim/riscv/insns/uksubh.h new file mode 100644 index 00000000..ac239895 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksubh.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_H(RS1, 0) - (sreg_t)P_H(RS2, 0); +P_SATU(res, 16); +WRITE_RD(sext_xlen((int16_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/uksubw.h b/vendor/riscv-isa-sim/riscv/insns/uksubw.h new file mode 100644 index 00000000..41a32e70 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uksubw.h @@ -0,0 +1,5 @@ +require_vector_vs; +require_extension(EXT_ZPN); +sreg_t res = (sreg_t)P_W(RS1, 0) - (sreg_t)P_W(RS2, 0); +P_SATU(res, 32); +WRITE_RD(sext32(res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/umaqa.h b/vendor/riscv-isa-sim/riscv/insns/umaqa.h new file mode 100644 index 00000000..474b174a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umaqa.h @@ -0,0 +1,3 @@ +P_REDUCTION_ULOOP(32, 8, true, false, { + pd_res += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umar64.h b/vendor/riscv-isa-sim/riscv/insns/umar64.h new file mode 100644 index 00000000..0a8a3524 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umar64.h @@ -0,0 +1,3 @@ +P_64_UPROFILE_REDUCTION(32, { + rd += ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umax16.h b/vendor/riscv-isa-sim/riscv/insns/umax16.h new file mode 100644 index 00000000..e60ed4ac --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umax16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umax32.h b/vendor/riscv-isa-sim/riscv/insns/umax32.h new file mode 100644 index 00000000..6156345c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umax32.h @@ -0,0 +1,4 @@ +require_rv64; +P_ULOOP(32, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umax8.h b/vendor/riscv-isa-sim/riscv/insns/umax8.h new file mode 100644 index 00000000..8a575a68 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umax8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 > ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umin16.h b/vendor/riscv-isa-sim/riscv/insns/umin16.h new file mode 100644 index 00000000..d4142118 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umin16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umin32.h b/vendor/riscv-isa-sim/riscv/insns/umin32.h new file mode 100644 index 00000000..96699452 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umin32.h @@ -0,0 +1,4 @@ +require_rv64; +P_ULOOP(32, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umin8.h b/vendor/riscv-isa-sim/riscv/insns/umin8.h new file mode 100644 index 00000000..bea8ccd2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umin8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 < ps2) ? ps1 : ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umsr64.h b/vendor/riscv-isa-sim/riscv/insns/umsr64.h new file mode 100644 index 00000000..0e186d96 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umsr64.h @@ -0,0 +1,3 @@ +P_64_UPROFILE_REDUCTION(32, { + rd -= ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umul16.h b/vendor/riscv-isa-sim/riscv/insns/umul16.h new file mode 100644 index 00000000..860f9420 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umul16.h @@ -0,0 +1,3 @@ +P_MUL_ULOOP(16, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umul8.h b/vendor/riscv-isa-sim/riscv/insns/umul8.h new file mode 100644 index 00000000..04d7a6ef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umul8.h @@ -0,0 +1,3 @@ +P_MUL_ULOOP(8, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umulx16.h b/vendor/riscv-isa-sim/riscv/insns/umulx16.h new file mode 100644 index 00000000..5abe9cf8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umulx16.h @@ -0,0 +1,3 @@ +P_MUL_CROSS_ULOOP(16, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/umulx8.h b/vendor/riscv-isa-sim/riscv/insns/umulx8.h new file mode 100644 index 00000000..a2b073de --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/umulx8.h @@ -0,0 +1,3 @@ +P_MUL_CROSS_ULOOP(8, { + pd = ps1 * ps2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/unshfl.h b/vendor/riscv-isa-sim/riscv/insns/unshfl.h new file mode 100644 index 00000000..78990b87 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/unshfl.h @@ -0,0 +1,9 @@ +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & ((xlen-1) >> 1); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 16) x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/unshfli.h b/vendor/riscv-isa-sim/riscv/insns/unshfli.h new file mode 100644 index 00000000..26920f14 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/unshfli.h @@ -0,0 +1,12 @@ +// Zbkb contains unzip but not general unshfli +require(((insn.rs2() == (xlen / 2 - 1)) && p->extension_enabled(EXT_ZBKB)) + || p->extension_enabled(EXT_XZBP)); +require(SHAMT < (xlen/2)); +reg_t x = RS1; +int shamt = SHAMT & ((xlen-1) >> 1); +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +if (shamt & 16) x = (x & 0xFFFF00000000FFFFLL) | ((x & 0x0000FFFF00000000LL) >> 16) | ((x & 0x00000000FFFF0000LL) << 16); +WRITE_RD(sext_xlen(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/unshflw.h b/vendor/riscv-isa-sim/riscv/insns/unshflw.h new file mode 100644 index 00000000..776534e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/unshflw.h @@ -0,0 +1,9 @@ +require_rv64; +require_extension(EXT_XZBP); +reg_t x = RS1; +int shamt = RS2 & 15; +if (shamt & 1) x = (x & 0x9999999999999999LL) | ((x & 0x4444444444444444LL) >> 1) | ((x & 0x2222222222222222LL) << 1); +if (shamt & 2) x = (x & 0xC3C3C3C3C3C3C3C3LL) | ((x & 0x3030303030303030LL) >> 2) | ((x & 0x0C0C0C0C0C0C0C0CLL) << 2); +if (shamt & 4) x = (x & 0xF00FF00FF00FF00FLL) | ((x & 0x0F000F000F000F00LL) >> 4) | ((x & 0x00F000F000F000F0LL) << 4); +if (shamt & 8) x = (x & 0xFF0000FFFF0000FFLL) | ((x & 0x00FF000000FF0000LL) >> 8) | ((x & 0x0000FF000000FF00LL) << 8); +WRITE_RD(sext32(x)); diff --git a/vendor/riscv-isa-sim/riscv/insns/uradd16.h b/vendor/riscv-isa-sim/riscv/insns/uradd16.h new file mode 100644 index 00000000..29610bf4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uradd16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uradd32.h b/vendor/riscv-isa-sim/riscv/insns/uradd32.h new file mode 100644 index 00000000..4f791d91 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uradd32.h @@ -0,0 +1,4 @@ +require_rv64; +P_ULOOP(32, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uradd64.h b/vendor/riscv-isa-sim/riscv/insns/uradd64.h new file mode 100644 index 00000000..f6787e16 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uradd64.h @@ -0,0 +1,9 @@ +P_64_UPROFILE({ + rd = rs1 + rs2; + if (rd < rs1) { + rd >>= 1; + rd |= ((reg_t)1 << 63); + } else { + rd >>= 1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uradd8.h b/vendor/riscv-isa-sim/riscv/insns/uradd8.h new file mode 100644 index 00000000..412e3d59 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uradd8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/uraddw.h b/vendor/riscv-isa-sim/riscv/insns/uraddw.h new file mode 100644 index 00000000..6a9455fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/uraddw.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +reg_t res = (reg_t)P_W(RS1, 0) + (reg_t)P_W(RS2, 0); +res >>= 1; +WRITE_RD(sext_xlen((int32_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/urcras16.h b/vendor/riscv-isa-sim/riscv/insns/urcras16.h new file mode 100644 index 00000000..2bac3156 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urcras16.h @@ -0,0 +1,5 @@ +P_CROSS_ULOOP(16, { + pd = (ps1 + ps2) >> 1; +}, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urcras32.h b/vendor/riscv-isa-sim/riscv/insns/urcras32.h new file mode 100644 index 00000000..a08e8777 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urcras32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_ULOOP(32, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urcrsa16.h b/vendor/riscv-isa-sim/riscv/insns/urcrsa16.h new file mode 100644 index 00000000..a890990a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urcrsa16.h @@ -0,0 +1,5 @@ +P_CROSS_ULOOP(16, { + pd = (ps1 - ps2) >> 1; +}, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urcrsa32.h b/vendor/riscv-isa-sim/riscv/insns/urcrsa32.h new file mode 100644 index 00000000..7ddd4f95 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urcrsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_CROSS_ULOOP(32, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urstas16.h b/vendor/riscv-isa-sim/riscv/insns/urstas16.h new file mode 100644 index 00000000..3cc89d81 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urstas16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_ULOOP(16, { + pd = (ps1 + ps2) >> 1; +}, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urstas32.h b/vendor/riscv-isa-sim/riscv/insns/urstas32.h new file mode 100644 index 00000000..668fa66a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urstas32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_ULOOP(32, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urstsa16.h b/vendor/riscv-isa-sim/riscv/insns/urstsa16.h new file mode 100644 index 00000000..a88bcf13 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urstsa16.h @@ -0,0 +1,5 @@ +P_STRAIGHT_ULOOP(16, { + pd = (ps1 - ps2) >> 1; +}, { + pd = (ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/urstsa32.h b/vendor/riscv-isa-sim/riscv/insns/urstsa32.h new file mode 100644 index 00000000..097337ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/urstsa32.h @@ -0,0 +1,6 @@ +require_rv64; +P_STRAIGHT_ULOOP(32, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}, { + pd = ((uint64_t)ps1 + ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursub16.h b/vendor/riscv-isa-sim/riscv/insns/ursub16.h new file mode 100644 index 00000000..bcd09f02 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursub16.h @@ -0,0 +1,3 @@ +P_ULOOP(16, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursub32.h b/vendor/riscv-isa-sim/riscv/insns/ursub32.h new file mode 100644 index 00000000..215436d0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursub32.h @@ -0,0 +1,4 @@ +require_rv64; +P_ULOOP(32, { + pd = ((uint64_t)ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursub64.h b/vendor/riscv-isa-sim/riscv/insns/ursub64.h new file mode 100644 index 00000000..3d845a0c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursub64.h @@ -0,0 +1,9 @@ +P_64_UPROFILE({ + rd = rs1 - rs2; + if (rd > rs1) { + rd >>= 1; + rd |= ((reg_t)1 << 63); + } else { + rd >>= 1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursub8.h b/vendor/riscv-isa-sim/riscv/insns/ursub8.h new file mode 100644 index 00000000..d66a9957 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursub8.h @@ -0,0 +1,3 @@ +P_ULOOP(8, { + pd = (ps1 - ps2) >> 1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/ursubw.h b/vendor/riscv-isa-sim/riscv/insns/ursubw.h new file mode 100644 index 00000000..5c90fde4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/ursubw.h @@ -0,0 +1,4 @@ +require_extension(EXT_ZPN); +reg_t res = (reg_t)P_W(RS1, 0) - (reg_t)P_W(RS2, 0); +res >>= 1; +WRITE_RD(sext_xlen((int32_t)res)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vaadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vaadd_vv.h new file mode 100644 index 00000000..0e7e39b4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vaadd_vv.h @@ -0,0 +1,2 @@ +// vaadd.vv vd, vs2, vs1 +VI_VV_LOOP_AVG(+); diff --git a/vendor/riscv-isa-sim/riscv/insns/vaadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vaadd_vx.h new file mode 100644 index 00000000..120e63eb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vaadd_vx.h @@ -0,0 +1,2 @@ +// vaadd.vx vd, vs2, rs1 +VI_VX_LOOP_AVG(+); diff --git a/vendor/riscv-isa-sim/riscv/insns/vaaddu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vaaddu_vv.h new file mode 100644 index 00000000..7eb7a895 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vaaddu_vv.h @@ -0,0 +1,2 @@ +// vaaddu.vv vd, vs2, vs1 +VI_VV_ULOOP_AVG(+); diff --git a/vendor/riscv-isa-sim/riscv/insns/vaaddu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vaaddu_vx.h new file mode 100644 index 00000000..325206f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vaaddu_vx.h @@ -0,0 +1,2 @@ +// vaaddu.vx vd, vs2, rs1 +VI_VX_ULOOP_AVG(+); diff --git a/vendor/riscv-isa-sim/riscv/insns/vadc_vim.h b/vendor/riscv-isa-sim/riscv/insns/vadc_vim.h new file mode 100644 index 00000000..4b0356f2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadc_vim.h @@ -0,0 +1,5 @@ +// vadc.vim vd, vs2, simm5, v0 +VI_XI_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & simm5) + (op_mask & vs2) + carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadc_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vadc_vvm.h new file mode 100644 index 00000000..7b41dd9f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadc_vvm.h @@ -0,0 +1,5 @@ +// vadc.vvm vd, vs2, rs1, v0 +VI_VV_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & vs1) + (op_mask & vs2) + carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadc_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vadc_vxm.h new file mode 100644 index 00000000..b1f8886d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadc_vxm.h @@ -0,0 +1,5 @@ +// vadc.vxm vd, vs2, rs1, v0 +VI_XI_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & rs1) + (op_mask & vs2) + carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadd_vi.h b/vendor/riscv-isa-sim/riscv/insns/vadd_vi.h new file mode 100644 index 00000000..45fc6b74 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadd_vi.h @@ -0,0 +1,5 @@ +// vadd.vi vd, simm5, vs2, vm +VI_VI_LOOP +({ + vd = simm5 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vadd_vv.h new file mode 100644 index 00000000..45c6bdcb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadd_vv.h @@ -0,0 +1,5 @@ +// vadd.vv vd, vs1, vs2, vm +VI_VV_LOOP +({ + vd = vs1 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vadd_vx.h new file mode 100644 index 00000000..33e72ee4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vadd_vx.h @@ -0,0 +1,5 @@ +// vadd.vx vd, rs1, vs2, vm +VI_VX_LOOP +({ + vd = rs1 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoaddei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoaddei16_v.h new file mode 100644 index 00000000..3cb3db70 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoaddei16_v.h @@ -0,0 +1,2 @@ +//vamoadde.v vd, (rs1), vs2, vd +VI_AMO({ return lhs + vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoaddei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoaddei32_v.h new file mode 100644 index 00000000..2bd77fcb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoaddei32_v.h @@ -0,0 +1,2 @@ +//vamoadde.v vd, (rs1), vs2, vd +VI_AMO({ return lhs + vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoaddei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoaddei64_v.h new file mode 100644 index 00000000..79ca7482 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoaddei64_v.h @@ -0,0 +1,2 @@ +//vamoadde.v vd, (rs1), vs2, vd +VI_AMO({ return lhs + vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoaddei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoaddei8_v.h new file mode 100644 index 00000000..06b8c793 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoaddei8_v.h @@ -0,0 +1,2 @@ +//vamoadde.v vd, (rs1), vs2, vd +VI_AMO({ return lhs + vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoandei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoandei16_v.h new file mode 100644 index 00000000..be119497 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoandei16_v.h @@ -0,0 +1,2 @@ +//vamoande.v vd, (rs1), vs2, vd +VI_AMO({ return lhs & vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoandei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoandei32_v.h new file mode 100644 index 00000000..71506704 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoandei32_v.h @@ -0,0 +1,2 @@ +//vamoande.v vd, (rs1), vs2, vd +VI_AMO({ return lhs & vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoandei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoandei64_v.h new file mode 100644 index 00000000..3efae3b5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoandei64_v.h @@ -0,0 +1,2 @@ +//vamoande.v vd, (rs1), vs2, vd +VI_AMO({ return lhs & vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoandei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoandei8_v.h new file mode 100644 index 00000000..c47645d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoandei8_v.h @@ -0,0 +1,2 @@ +//vamoande.v vd, (rs1), vs2, vd +VI_AMO({ return lhs & vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxei16_v.h new file mode 100644 index 00000000..ca67893e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxei16_v.h @@ -0,0 +1,2 @@ +//vamomaxe.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3; }, int, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxei32_v.h new file mode 100644 index 00000000..b6823cd0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxei32_v.h @@ -0,0 +1,2 @@ +//vamomaxe.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3; }, int, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxei64_v.h new file mode 100644 index 00000000..46e8a3bb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxei64_v.h @@ -0,0 +1,2 @@ +//vamomaxe.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3; }, int, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxei8_v.h new file mode 100644 index 00000000..9697b3a4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxei8_v.h @@ -0,0 +1,2 @@ +//vamomaxe.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3; }, int, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxuei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei16_v.h new file mode 100644 index 00000000..e05971df --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei16_v.h @@ -0,0 +1,2 @@ +//vamomaxue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3;; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxuei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei32_v.h new file mode 100644 index 00000000..9b873543 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei32_v.h @@ -0,0 +1,2 @@ +//vamomaxue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3;; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxuei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei64_v.h new file mode 100644 index 00000000..bbfbc9f2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei64_v.h @@ -0,0 +1,2 @@ +//vamomaxue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3;; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamomaxuei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei8_v.h new file mode 100644 index 00000000..357ba245 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamomaxuei8_v.h @@ -0,0 +1,2 @@ +//vamomaxue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs >= vs3 ? lhs : vs3;; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominei16_v.h new file mode 100644 index 00000000..9d1ecac6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominei16_v.h @@ -0,0 +1,2 @@ +//vamomine.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3; }, int, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominei32_v.h new file mode 100644 index 00000000..6cb8475e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominei32_v.h @@ -0,0 +1,2 @@ +//vamomine.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3; }, int, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominei64_v.h new file mode 100644 index 00000000..9ef3d4ee --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominei64_v.h @@ -0,0 +1,2 @@ +//vamomine.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3; }, int, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominei8_v.h new file mode 100644 index 00000000..5c035ea4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominei8_v.h @@ -0,0 +1,2 @@ +//vamomine.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3; }, int, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominuei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominuei16_v.h new file mode 100644 index 00000000..d4a8f892 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominuei16_v.h @@ -0,0 +1,2 @@ +//vamominue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3;; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominuei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominuei32_v.h new file mode 100644 index 00000000..16296c5b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominuei32_v.h @@ -0,0 +1,2 @@ +//vamominue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3;; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominuei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominuei64_v.h new file mode 100644 index 00000000..fd850fd0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominuei64_v.h @@ -0,0 +1,2 @@ +//vamominue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3;; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamominuei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamominuei8_v.h new file mode 100644 index 00000000..3749d052 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamominuei8_v.h @@ -0,0 +1,2 @@ +//vamominue.v vd, (rs1), vs2, vd +VI_AMO({ return lhs < vs3 ? lhs : vs3;; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoorei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoorei16_v.h new file mode 100644 index 00000000..a5ba1caa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoorei16_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs | vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoorei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoorei32_v.h new file mode 100644 index 00000000..94e4458e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoorei32_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs | vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoorei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoorei64_v.h new file mode 100644 index 00000000..84e03944 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoorei64_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs | vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoorei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoorei8_v.h new file mode 100644 index 00000000..364035db --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoorei8_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs | vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoswapei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoswapei16_v.h new file mode 100644 index 00000000..31ff0210 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoswapei16_v.h @@ -0,0 +1,2 @@ +//vamoswape.v vd, (rs1), vs2, vd +VI_AMO({ return vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoswapei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoswapei32_v.h new file mode 100644 index 00000000..a5741929 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoswapei32_v.h @@ -0,0 +1,2 @@ +//vamoswape.v vd, (rs1), vs2, vd +VI_AMO({ return vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoswapei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoswapei64_v.h new file mode 100644 index 00000000..58bd0352 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoswapei64_v.h @@ -0,0 +1,2 @@ +//vamoswape.v vd, (rs1), vs2, vd +VI_AMO({ return vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoswapei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoswapei8_v.h new file mode 100644 index 00000000..af37c8c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoswapei8_v.h @@ -0,0 +1,2 @@ +//vamoswape.v vd, (rs1), vs2, vd +VI_AMO({ return vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoxorei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoxorei16_v.h new file mode 100644 index 00000000..61e8c327 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoxorei16_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs ^ vs3; }, uint, e16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoxorei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoxorei32_v.h new file mode 100644 index 00000000..d48d9515 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoxorei32_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs ^ vs3; }, uint, e32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoxorei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoxorei64_v.h new file mode 100644 index 00000000..f7a3ca42 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoxorei64_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs ^ vs3; }, uint, e64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vamoxorei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vamoxorei8_v.h new file mode 100644 index 00000000..4b6c7982 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vamoxorei8_v.h @@ -0,0 +1,2 @@ +//vamoore.v vd, (rs1), vs2, vd +VI_AMO({ return lhs ^ vs3; }, uint, e8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vand_vi.h b/vendor/riscv-isa-sim/riscv/insns/vand_vi.h new file mode 100644 index 00000000..dd9618ba --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vand_vi.h @@ -0,0 +1,5 @@ +// vand.vi vd, simm5, vs2, vm +VI_VI_LOOP +({ + vd = simm5 & vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vand_vv.h b/vendor/riscv-isa-sim/riscv/insns/vand_vv.h new file mode 100644 index 00000000..65558e4b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vand_vv.h @@ -0,0 +1,5 @@ +// vand.vv vd, vs1, vs2, vm +VI_VV_LOOP +({ + vd = vs1 & vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vand_vx.h b/vendor/riscv-isa-sim/riscv/insns/vand_vx.h new file mode 100644 index 00000000..8eea1ed5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vand_vx.h @@ -0,0 +1,5 @@ +// vand.vx vd, rs1, vs2, vm +VI_VX_LOOP +({ + vd = rs1 & vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vasub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vasub_vv.h new file mode 100644 index 00000000..7dfbdfcf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vasub_vv.h @@ -0,0 +1,2 @@ +// vasub.vv vd, vs2, vs1 +VI_VV_LOOP_AVG(-); diff --git a/vendor/riscv-isa-sim/riscv/insns/vasub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vasub_vx.h new file mode 100644 index 00000000..185fa9c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vasub_vx.h @@ -0,0 +1,2 @@ +// vasub.vx vd, vs2, rs1 +VI_VX_LOOP_AVG(-); diff --git a/vendor/riscv-isa-sim/riscv/insns/vasubu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vasubu_vv.h new file mode 100644 index 00000000..902fef99 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vasubu_vv.h @@ -0,0 +1,2 @@ +// vasubu.vv vd, vs2, vs1 +VI_VV_ULOOP_AVG(-); diff --git a/vendor/riscv-isa-sim/riscv/insns/vasubu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vasubu_vx.h new file mode 100644 index 00000000..874dc59e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vasubu_vx.h @@ -0,0 +1,2 @@ +// vasubu.vx vd, vs2, rs1 +VI_VX_ULOOP_AVG(-); diff --git a/vendor/riscv-isa-sim/riscv/insns/vcompress_vm.h b/vendor/riscv-isa-sim/riscv/insns/vcompress_vm.h new file mode 100644 index 00000000..71953459 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vcompress_vm.h @@ -0,0 +1,33 @@ +// vcompress vd, vs2, vs1 +require(P.VU.vstart->read() == 0); +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require(insn.rd() != insn.rs2()); +require_noover(insn.rd(), P.VU.vflmul, insn.rs1(), 1); + +reg_t pos = 0; + +VI_GENERAL_LOOP_BASE + const int midx = i / 64; + const int mpos = i % 64; + + bool do_mask = (P.VU.elt(rs1_num, midx) >> mpos) & 0x1; + if (do_mask) { + switch (sew) { + case e8: + P.VU.elt(rd_num, pos, true) = P.VU.elt(rs2_num, i); + break; + case e16: + P.VU.elt(rd_num, pos, true) = P.VU.elt(rs2_num, i); + break; + case e32: + P.VU.elt(rd_num, pos, true) = P.VU.elt(rs2_num, i); + break; + default: + P.VU.elt(rd_num, pos, true) = P.VU.elt(rs2_num, i); + break; + } + + ++pos; + } +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vcpop_m.h b/vendor/riscv-isa-sim/riscv/insns/vcpop_m.h new file mode 100644 index 00000000..cbe45a4b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vcpop_m.h @@ -0,0 +1,23 @@ +// vmpopc rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +reg_t vl = P.VU.vl->read(); +reg_t sew = P.VU.vsew; +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); +require(P.VU.vstart->read() == 0); +reg_t popcount = 0; +for (reg_t i=P.VU.vstart->read(); i(rs2_num, midx ) >> mpos) & 0x1) == 1; + if (insn.v_vm() == 1) { + popcount += vs2_lsb; + } else { + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + popcount += (vs2_lsb && do_mask); + } +} +P.VU.vstart->write(0); +WRITE_RD(popcount); diff --git a/vendor/riscv-isa-sim/riscv/insns/vdiv_vv.h b/vendor/riscv-isa-sim/riscv/insns/vdiv_vv.h new file mode 100644 index 00000000..0d4bd0d8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vdiv_vv.h @@ -0,0 +1,10 @@ +// vdiv.vv vd, vs2, vs1 +VI_VV_LOOP +({ + if (vs1 == 0) + vd = -1; + else if (vs2 == (INT64_MIN >> (64 - sew)) && vs1 == -1) + vd = vs2; + else + vd = vs2 / vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vdiv_vx.h b/vendor/riscv-isa-sim/riscv/insns/vdiv_vx.h new file mode 100644 index 00000000..40529527 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vdiv_vx.h @@ -0,0 +1,10 @@ +// vdiv.vx vd, vs2, rs1 +VI_VX_LOOP +({ + if(rs1 == 0) + vd = -1; + else if(vs2 == (INT64_MIN >> (64 - sew)) && rs1 == -1) + vd = vs2; + else + vd = vs2 / rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vdivu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vdivu_vv.h new file mode 100644 index 00000000..ef6e777d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vdivu_vv.h @@ -0,0 +1,8 @@ +// vdivu.vv vd, vs2, vs1 +VI_VV_ULOOP +({ + if(vs1 == 0) + vd = -1; + else + vd = vs2 / vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vdivu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vdivu_vx.h new file mode 100644 index 00000000..7ffe1c68 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vdivu_vx.h @@ -0,0 +1,8 @@ +// vdivu.vx vd, vs2, rs1 +VI_VX_ULOOP +({ + if(rs1 == 0) + vd = -1; + else + vd = vs2 / rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfadd_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfadd_vf.h new file mode 100644 index 00000000..2b808e0c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfadd_vf.h @@ -0,0 +1,11 @@ +// vfadd.vf vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_add(rs1, vs2); +}, +{ + vd = f32_add(rs1, vs2); +}, +{ + vd = f64_add(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfadd_vv.h new file mode 100644 index 00000000..ce94921d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfadd_vv.h @@ -0,0 +1,11 @@ +// vfadd.vv vd, vs2, vs1 +VI_VFP_VV_LOOP +({ + vd = f16_add(vs1, vs2); +}, +{ + vd = f32_add(vs1, vs2); +}, +{ + vd = f64_add(vs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfclass_v.h b/vendor/riscv-isa-sim/riscv/insns/vfclass_v.h new file mode 100644 index 00000000..a307d2d1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfclass_v.h @@ -0,0 +1,11 @@ +// vfclass.v vd, vs2, vm +VI_VFP_V_LOOP +({ + vd = f16(f16_classify(vs2)); +}, +{ + vd = f32(f32_classify(vs2)); +}, +{ + vd = f64(f64_classify(vs2)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_x_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_x_v.h new file mode 100644 index 00000000..d094c140 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_x_v.h @@ -0,0 +1,7 @@ +// vfcvt.f.x.v vd, vd2, vm +VI_VFP_CVT_INT_TO_FP( + { vd = i32_to_f16(vs2); }, // BODY16 + { vd = i32_to_f32(vs2); }, // BODY32 + { vd = i64_to_f64(vs2); }, // BODY64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_xu_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_xu_v.h new file mode 100644 index 00000000..64dbb1c1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_f_xu_v.h @@ -0,0 +1,7 @@ +// vfcvt.f.xu.v vd, vd2, vm +VI_VFP_CVT_INT_TO_FP( + { vd = ui32_to_f16(vs2); }, // BODY16 + { vd = ui32_to_f32(vs2); }, // BODY32 + { vd = ui64_to_f64(vs2); }, // BODY64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_x_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_x_f_v.h new file mode 100644 index 00000000..ecdfa22d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_x_f_v.h @@ -0,0 +1,7 @@ +// vfcvt.rtz.x.f.v vd, vd2, vm +VI_VFP_CVT_FP_TO_INT( + { vd = f16_to_i16(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_i32(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_i64(vs2, softfloat_round_minMag, true); }, // BODY64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_xu_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_xu_f_v.h new file mode 100644 index 00000000..87585d24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_rtz_xu_f_v.h @@ -0,0 +1,7 @@ +// vfcvt.rtz.xu.f.v vd, vd2, vm +VI_VFP_CVT_FP_TO_INT( + { vd = f16_to_ui16(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_ui64(vs2, softfloat_round_minMag, true); }, // BODY64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_x_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_x_f_v.h new file mode 100644 index 00000000..4f21b52f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_x_f_v.h @@ -0,0 +1,7 @@ +// vfcvt.x.f.v vd, vd2, vm +VI_VFP_CVT_FP_TO_INT( + { vd = f16_to_i16(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_i32(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_i64(vs2, softfloat_roundingMode, true); }, // BODY64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfcvt_xu_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfcvt_xu_f_v.h new file mode 100644 index 00000000..ba50fff6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfcvt_xu_f_v.h @@ -0,0 +1,7 @@ +// vfcvt.xu.f.v vd, vd2, vm +VI_VFP_CVT_FP_TO_INT( + { vd = f16_to_ui16(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_ui64(vs2, softfloat_roundingMode, true); }, // BODY64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfdiv_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfdiv_vf.h new file mode 100644 index 00000000..a703ef02 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfdiv_vf.h @@ -0,0 +1,11 @@ +// vfdiv.vf vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_div(vs2, rs1); +}, +{ + vd = f32_div(vs2, rs1); +}, +{ + vd = f64_div(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfdiv_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfdiv_vv.h new file mode 100644 index 00000000..c66d7516 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfdiv_vv.h @@ -0,0 +1,11 @@ +// vfdiv.vv vd, vs2, vs1 +VI_VFP_VV_LOOP +({ + vd = f16_div(vs2, vs1); +}, +{ + vd = f32_div(vs2, vs1); +}, +{ + vd = f64_div(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfirst_m.h b/vendor/riscv-isa-sim/riscv/insns/vfirst_m.h new file mode 100644 index 00000000..5b768ed4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfirst_m.h @@ -0,0 +1,20 @@ +// vmfirst rd, vs2 +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +reg_t vl = P.VU.vl->read(); +reg_t sew = P.VU.vsew; +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); +require(P.VU.vstart->read() == 0); +reg_t pos = -1; +for (reg_t i=P.VU.vstart->read(); i < vl; ++i) { + VI_LOOP_ELEMENT_SKIP() + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx ) >> mpos) & 0x1) == 1; + if (vs2_lsb) { + pos = i; + break; + } +} +P.VU.vstart->write(0); +WRITE_RD(pos); diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmacc_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmacc_vf.h new file mode 100644 index 00000000..61578d33 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmacc_vf.h @@ -0,0 +1,11 @@ +// vfmacc.vf vd, rs1, vs2, vm # vd[i] = +(vs2[i] * x[rs1]) + vd[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(rs1, vs2, vd); +}, +{ + vd = f32_mulAdd(rs1, vs2, vd); +}, +{ + vd = f64_mulAdd(rs1, vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmacc_vv.h new file mode 100644 index 00000000..499b1d4d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmacc_vv.h @@ -0,0 +1,11 @@ +// vfmacc.vv vd, rs1, vs2, vm # vd[i] = +(vs2[i] * vs1[i]) + vd[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(vs1, vs2, vd); +}, +{ + vd = f32_mulAdd(vs1, vs2, vd); +}, +{ + vd = f64_mulAdd(vs1, vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmadd_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmadd_vf.h new file mode 100644 index 00000000..2a014295 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmadd_vf.h @@ -0,0 +1,11 @@ +// vfmadd: vd[i] = +(vd[i] * f[rs1]) + vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(vd, rs1, vs2); +}, +{ + vd = f32_mulAdd(vd, rs1, vs2); +}, +{ + vd = f64_mulAdd(vd, rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmadd_vv.h new file mode 100644 index 00000000..7ef734f8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmadd_vv.h @@ -0,0 +1,11 @@ +// vfmadd: vd[i] = +(vd[i] * vs1[i]) + vs2[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(vd, vs1, vs2); +}, +{ + vd = f32_mulAdd(vd, vs1, vs2); +}, +{ + vd = f64_mulAdd(vd, vs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmax_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmax_vf.h new file mode 100644 index 00000000..c4b74cbd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmax_vf.h @@ -0,0 +1,11 @@ +// vfmax +VI_VFP_VF_LOOP +({ + vd = f16_max(vs2, rs1); +}, +{ + vd = f32_max(vs2, rs1); +}, +{ + vd = f64_max(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmax_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmax_vv.h new file mode 100644 index 00000000..6439c899 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmax_vv.h @@ -0,0 +1,11 @@ +// vfmax +VI_VFP_VV_LOOP +({ + vd = f16_max(vs2, vs1); +}, +{ + vd = f32_max(vs2, vs1); +}, +{ + vd = f64_max(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmerge_vfm.h b/vendor/riscv-isa-sim/riscv/insns/vfmerge_vfm.h new file mode 100644 index 00000000..d82dfef5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmerge_vfm.h @@ -0,0 +1,4 @@ +// vfmerge_vf vd, vs2, vs1, vm +VI_VF_MERGE_LOOP({ + vd = use_first ? rs1 : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmin_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmin_vf.h new file mode 100644 index 00000000..1560cdf7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmin_vf.h @@ -0,0 +1,11 @@ +// vfmin vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_min(vs2, rs1); +}, +{ + vd = f32_min(vs2, rs1); +}, +{ + vd = f64_min(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmin_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmin_vv.h new file mode 100644 index 00000000..882a7740 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmin_vv.h @@ -0,0 +1,11 @@ +// vfmin vd, vs2, vs1 +VI_VFP_VV_LOOP +({ + vd = f16_min(vs2, vs1); +}, +{ + vd = f32_min(vs2, vs1); +}, +{ + vd = f64_min(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmsac_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmsac_vf.h new file mode 100644 index 00000000..8af397b9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmsac_vf.h @@ -0,0 +1,11 @@ +// vfmsac: vd[i] = +(f[rs1] * vs2[i]) - vd[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(rs1, vs2, f16(vd.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(rs1, vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(rs1, vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmsac_vv.h new file mode 100644 index 00000000..3bb50e50 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmsac_vv.h @@ -0,0 +1,11 @@ +// vfmsac: vd[i] = +(vs1[i] * vs2[i]) - vd[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(vs1, vs2, f16(vd.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(vs1, vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(vs1, vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmsub_vf.h new file mode 100644 index 00000000..ab77b4c6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmsub_vf.h @@ -0,0 +1,11 @@ +// vfmsub: vd[i] = +(vd[i] * f[rs1]) - vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(vd, rs1, f16(vs2.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(vd, rs1, f32(vs2.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(vd, rs1, f64(vs2.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmsub_vv.h new file mode 100644 index 00000000..3cac937f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmsub_vv.h @@ -0,0 +1,11 @@ +// vfmsub: vd[i] = +(vd[i] * vs1[i]) - vs2[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(vd, vs1, f16(vs2.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(vd, vs1, f32(vs2.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(vd, vs1, f64(vs2.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmul_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfmul_vf.h new file mode 100644 index 00000000..f5f63e49 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmul_vf.h @@ -0,0 +1,11 @@ +// vfmul.vf vd, vs2, rs1, vm +VI_VFP_VF_LOOP +({ + vd = f16_mul(vs2, rs1); +}, +{ + vd = f32_mul(vs2, rs1); +}, +{ + vd = f64_mul(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfmul_vv.h new file mode 100644 index 00000000..7930fd03 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmul_vv.h @@ -0,0 +1,11 @@ +// vfmul.vv vd, vs1, vs2, vm +VI_VFP_VV_LOOP +({ + vd = f16_mul(vs1, vs2); +}, +{ + vd = f32_mul(vs1, vs2); +}, +{ + vd = f64_mul(vs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmv_f_s.h b/vendor/riscv-isa-sim/riscv/insns/vfmv_f_s.h new file mode 100644 index 00000000..81605eaf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmv_f_s.h @@ -0,0 +1,38 @@ +// vfmv_f_s: rd = vs2[0] (rs1=0) +require_vector(true); +require_fp; +require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || + (P.VU.vsew == e32 && p->extension_enabled('F')) || + (P.VU.vsew == e64 && p->extension_enabled('D'))); +require(STATE.frm->read() < 0x5); + +reg_t rs2_num = insn.rs2(); +uint64_t vs2_0 = 0; +const reg_t sew = P.VU.vsew; +switch(sew) { + case e16: + vs2_0 = P.VU.elt(rs2_num, 0); + break; + case e32: + vs2_0 = P.VU.elt(rs2_num, 0); + break; + case e64: + vs2_0 = P.VU.elt(rs2_num, 0); + break; + default: + require(0); + break; +} + +// nan_extened +if (FLEN > sew) { + vs2_0 = vs2_0 | (UINT64_MAX << sew); +} + +if (FLEN == 64) { + WRITE_FRD(f64(vs2_0)); +} else { + WRITE_FRD(f32(vs2_0)); +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmv_s_f.h b/vendor/riscv-isa-sim/riscv/insns/vfmv_s_f.h new file mode 100644 index 00000000..edc376e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmv_s_f.h @@ -0,0 +1,29 @@ +// vfmv_s_f: vd[0] = rs1 (vs2=0) +require_vector(true); +require_fp; +require((P.VU.vsew == e16 && p->extension_enabled(EXT_ZFH)) || + (P.VU.vsew == e32 && p->extension_enabled('F')) || + (P.VU.vsew == e64 && p->extension_enabled('D'))); +require(STATE.frm->read() < 0x5); + +reg_t vl = P.VU.vl->read(); + +if (vl > 0 && P.VU.vstart->read() < vl) { + reg_t rd_num = insn.rd(); + + switch(P.VU.vsew) { + case e16: + P.VU.elt(rd_num, 0, true) = f16(FRS1).v; + break; + case e32: + P.VU.elt(rd_num, 0, true) = f32(FRS1).v; + break; + case e64: + if (FLEN == 64) + P.VU.elt(rd_num, 0, true) = f64(FRS1).v; + else + P.VU.elt(rd_num, 0, true) = f32(FRS1).v; + break; + } +} +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vfmv_v_f.h b/vendor/riscv-isa-sim/riscv/insns/vfmv_v_f.h new file mode 100644 index 00000000..50b7513c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfmv_v_f.h @@ -0,0 +1,4 @@ +// vfmv_vf vd, vs1 +VI_VF_MERGE_LOOP({ + vd = rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_f_w.h new file mode 100644 index 00000000..f4996f5d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_f_w.h @@ -0,0 +1,9 @@ +// vfncvt.f.f.v vd, vs2, vm +VI_VFP_NCVT_FP_TO_FP( + {;}, // BODY16 + { vd = f32_to_f16(vs2); }, // BODY32 + { vd = f64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('D'); } // CHECK64 +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_x_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_x_w.h new file mode 100644 index 00000000..d587be26 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_x_w.h @@ -0,0 +1,10 @@ +// vfncvt.f.x.v vd, vs2, vm +VI_VFP_NCVT_INT_TO_FP( + {;}, // BODY16 + { vd = i32_to_f16(vs2); }, // BODY32 + { vd = i64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); }, // CHECK64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_xu_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_xu_w.h new file mode 100644 index 00000000..5e0e34fa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_f_xu_w.h @@ -0,0 +1,10 @@ +// vfncvt.f.xu.v vd, vs2, vm +VI_VFP_NCVT_INT_TO_FP( + {;}, // BODY16 + { vd = ui32_to_f16(vs2); }, // BODY32 + { vd = ui64_to_f32(vs2); }, // BODY64 + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); }, // CHECK64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_rod_f_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rod_f_f_w.h new file mode 100644 index 00000000..89bdc05f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rod_f_f_w.h @@ -0,0 +1,15 @@ +// vfncvt.rod.f.f.v vd, vs2, vm +VI_VFP_NCVT_FP_TO_FP( + {;}, // BODY16 + { // BODY32 + softfloat_roundingMode = softfloat_round_odd; + vd = f32_to_f16(vs2); + }, + { // BODY64 + softfloat_roundingMode = softfloat_round_odd; + vd = f64_to_f32(vs2); + }, + {;}, // CHECK16 + { require_extension(EXT_ZFH); }, // CHECK32 + { require_extension('F'); } // CHECK64 +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_x_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_x_f_w.h new file mode 100644 index 00000000..23b4d5e2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_x_f_w.h @@ -0,0 +1,10 @@ +// vfncvt.rtz.x.f.w vd, vs2, vm +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_i8(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_i16(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_i32(vs2, softfloat_round_minMag, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_xu_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_xu_f_w.h new file mode 100644 index 00000000..f55c680b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_rtz_xu_f_w.h @@ -0,0 +1,10 @@ +// vfncvt.rtz.xu.f.w vd, vs2, vm +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_ui8(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_ui16(vs2, softfloat_round_minMag, true); }, // BODY32 + { vd = f64_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_x_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_x_f_w.h new file mode 100644 index 00000000..a7f3c334 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_x_f_w.h @@ -0,0 +1,10 @@ +// vfncvt.x.f.w vd, vs2, vm +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_i8(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_i16(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_i32(vs2, softfloat_roundingMode, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfncvt_xu_f_w.h b/vendor/riscv-isa-sim/riscv/insns/vfncvt_xu_f_w.h new file mode 100644 index 00000000..02046e8b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfncvt_xu_f_w.h @@ -0,0 +1,10 @@ +// vfncvt.xu.f.w vd, vs2, vm +VI_VFP_NCVT_FP_TO_INT( + { vd = f16_to_ui8(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_ui16(vs2, softfloat_roundingMode, true); }, // BODY32 + { vd = f64_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY64 + { require_extension(EXT_ZFH); }, // CHECK16 + { require(p->extension_enabled('F')); }, // CHECK32 + { require(p->extension_enabled('D')); }, // CHECK64 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vf.h new file mode 100644 index 00000000..1b99302c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vf.h @@ -0,0 +1,11 @@ +// vfnmacc: vd[i] = -(f[rs1] * vs2[i]) - vd[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(rs1, f16(vs2.v ^ F16_SIGN), f16(vd.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(rs1, f32(vs2.v ^ F32_SIGN), f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(rs1, f64(vs2.v ^ F64_SIGN), f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vv.h new file mode 100644 index 00000000..7200e063 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmacc_vv.h @@ -0,0 +1,11 @@ +// vfnmacc: vd[i] = -(vs1[i] * vs2[i]) - vd[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(f16(vs2.v ^ F16_SIGN), vs1, f16(vd.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(f32(vs2.v ^ F32_SIGN), vs1, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(vs2.v ^ F64_SIGN), vs1, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vf.h new file mode 100644 index 00000000..cb9c217f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vf.h @@ -0,0 +1,11 @@ +// vfnmadd: vd[i] = -(vd[i] * f[rs1]) - vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), rs1, f16(vs2.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), rs1, f32(vs2.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), rs1, f64(vs2.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vv.h new file mode 100644 index 00000000..7160ed7d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmadd_vv.h @@ -0,0 +1,11 @@ +// vfnmadd: vd[i] = -(vd[i] * vs1[i]) - vs2[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), vs1, f16(vs2.v ^ F16_SIGN)); +}, +{ + vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), vs1, f32(vs2.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), vs1, f64(vs2.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vf.h new file mode 100644 index 00000000..aa6baa30 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vf.h @@ -0,0 +1,11 @@ +// vfnmsac: vd[i] = -(f[rs1] * vs2[i]) + vd[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(rs1, f16(vs2.v ^ F16_SIGN), vd); +}, +{ + vd = f32_mulAdd(rs1, f32(vs2.v ^ F32_SIGN), vd); +}, +{ + vd = f64_mulAdd(rs1, f64(vs2.v ^ F64_SIGN), vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vv.h new file mode 100644 index 00000000..47db61d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmsac_vv.h @@ -0,0 +1,11 @@ +// vfnmsac.vv vd, vs1, vs2, vm # vd[i] = -(vs2[i] * vs1[i]) + vd[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(f16(vs1.v ^ F16_SIGN), vs2, vd); +}, +{ + vd = f32_mulAdd(f32(vs1.v ^ F32_SIGN), vs2, vd); +}, +{ + vd = f64_mulAdd(f64(vs1.v ^ F64_SIGN), vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vf.h new file mode 100644 index 00000000..43aa9e26 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vf.h @@ -0,0 +1,11 @@ +// vfnmsub: vd[i] = -(vd[i] * f[rs1]) + vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), rs1, vs2); +}, +{ + vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), rs1, vs2); +}, +{ + vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vv.h new file mode 100644 index 00000000..2a45c8fc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfnmsub_vv.h @@ -0,0 +1,11 @@ +// vfnmsub: vd[i] = -(vd[i] * vs1[i]) + vs2[i] +VI_VFP_VV_LOOP +({ + vd = f16_mulAdd(f16(vd.v ^ F16_SIGN), vs1, vs2); +}, +{ + vd = f32_mulAdd(f32(vd.v ^ F32_SIGN), vs1, vs2); +}, +{ + vd = f64_mulAdd(f64(vd.v ^ F64_SIGN), vs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfrdiv_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfrdiv_vf.h new file mode 100644 index 00000000..b283343c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfrdiv_vf.h @@ -0,0 +1,11 @@ +// vfrdiv.vf vd, vs2, rs1, vm # scalar-vector, vd[i] = f[rs1]/vs2[i] +VI_VFP_VF_LOOP +({ + vd = f16_div(rs1, vs2); +}, +{ + vd = f32_div(rs1, vs2); +}, +{ + vd = f64_div(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfrec7_v.h b/vendor/riscv-isa-sim/riscv/insns/vfrec7_v.h new file mode 100644 index 00000000..69c026b0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfrec7_v.h @@ -0,0 +1,11 @@ +// vfclass.v vd, vs2, vm +VI_VFP_V_LOOP +({ + vd = f16_recip7(vs2); +}, +{ + vd = f32_recip7(vs2); +}, +{ + vd = f64_recip7(vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfredmax_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfredmax_vs.h new file mode 100644 index 00000000..f19ec597 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfredmax_vs.h @@ -0,0 +1,12 @@ +// vfredmax vd, vs2, vs1 +bool is_propagate = false; +VI_VFP_VV_LOOP_REDUCTION +({ + vd_0 = f16_max(vd_0, vs2); +}, +{ + vd_0 = f32_max(vd_0, vs2); +}, +{ + vd_0 = f64_max(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfredmin_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfredmin_vs.h new file mode 100644 index 00000000..e3cf1513 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfredmin_vs.h @@ -0,0 +1,12 @@ +// vfredmin vd, vs2, vs1 +bool is_propagate = false; +VI_VFP_VV_LOOP_REDUCTION +({ + vd_0 = f16_min(vd_0, vs2); +}, +{ + vd_0 = f32_min(vd_0, vs2); +}, +{ + vd_0 = f64_min(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfredosum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfredosum_vs.h new file mode 100644 index 00000000..2438a7ba --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfredosum_vs.h @@ -0,0 +1,12 @@ +// vfredosum: vd[0] = sum( vs2[*] , vs1[0] ) +bool is_propagate = false; +VI_VFP_VV_LOOP_REDUCTION +({ + vd_0 = f16_add(vd_0, vs2); +}, +{ + vd_0 = f32_add(vd_0, vs2); +}, +{ + vd_0 = f64_add(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfredusum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfredusum_vs.h new file mode 100644 index 00000000..bad7308e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfredusum_vs.h @@ -0,0 +1,12 @@ +// vfredsum: vd[0] = sum( vs2[*] , vs1[0] ) +bool is_propagate = true; +VI_VFP_VV_LOOP_REDUCTION +({ + vd_0 = f16_add(vd_0, vs2); +}, +{ + vd_0 = f32_add(vd_0, vs2); +}, +{ + vd_0 = f64_add(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfrsqrt7_v.h b/vendor/riscv-isa-sim/riscv/insns/vfrsqrt7_v.h new file mode 100644 index 00000000..a0737641 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfrsqrt7_v.h @@ -0,0 +1,11 @@ +// vfclass.v vd, vs2, vm +VI_VFP_V_LOOP +({ + vd = f16_rsqrte7(vs2); +}, +{ + vd = f32_rsqrte7(vs2); +}, +{ + vd = f64_rsqrte7(vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfrsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfrsub_vf.h new file mode 100644 index 00000000..7fb26a5b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfrsub_vf.h @@ -0,0 +1,11 @@ +// vfsub.vf vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_sub(rs1, vs2); +}, +{ + vd = f32_sub(rs1, vs2); +}, +{ + vd = f64_sub(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vf.h new file mode 100644 index 00000000..ce06185e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vf.h @@ -0,0 +1,11 @@ +// vfsgnj vd, vs2, vs1 +VI_VFP_VF_LOOP +({ + vd = fsgnj16(vs2.v, rs1.v, false, false); +}, +{ + vd = fsgnj32(vs2.v, rs1.v, false, false); +}, +{ + vd = fsgnj64(vs2.v, rs1.v, false, false); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vv.h new file mode 100644 index 00000000..722cb29c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnj_vv.h @@ -0,0 +1,11 @@ +// vfsgnj +VI_VFP_VV_LOOP +({ + vd = fsgnj16(vs2.v, vs1.v, false, false); +}, +{ + vd = fsgnj32(vs2.v, vs1.v, false, false); +}, +{ + vd = fsgnj64(vs2.v, vs1.v, false, false); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vf.h new file mode 100644 index 00000000..e4894124 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vf.h @@ -0,0 +1,11 @@ +// vfsgnn +VI_VFP_VF_LOOP +({ + vd = fsgnj16(vs2.v, rs1.v, true, false); +}, +{ + vd = fsgnj32(vs2.v, rs1.v, true, false); +}, +{ + vd = fsgnj64(vs2.v, rs1.v, true, false); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vv.h new file mode 100644 index 00000000..1d91f691 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnjn_vv.h @@ -0,0 +1,11 @@ +// vfsgnn +VI_VFP_VV_LOOP +({ + vd = fsgnj16(vs2.v, vs1.v, true, false); +}, +{ + vd = fsgnj32(vs2.v, vs1.v, true, false); +}, +{ + vd = fsgnj64(vs2.v, vs1.v, true, false); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vf.h new file mode 100644 index 00000000..7be164c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vf.h @@ -0,0 +1,11 @@ +// vfsgnx +VI_VFP_VF_LOOP +({ + vd = fsgnj16(vs2.v, rs1.v, false, true); +}, +{ + vd = fsgnj32(vs2.v, rs1.v, false, true); +}, +{ + vd = fsgnj64(vs2.v, rs1.v, false, true); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vv.h new file mode 100644 index 00000000..b04b8454 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsgnjx_vv.h @@ -0,0 +1,11 @@ +// vfsgnx +VI_VFP_VV_LOOP +({ + vd = fsgnj16(vs2.v, vs1.v, false, true); +}, +{ + vd = fsgnj32(vs2.v, vs1.v, false, true); +}, +{ + vd = fsgnj64(vs2.v, vs1.v, false, true); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfslide1down_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfslide1down_vf.h new file mode 100644 index 00000000..66eeaccb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfslide1down_vf.h @@ -0,0 +1,36 @@ +//vfslide1down.vf vd, vs2, rs1 +VI_CHECK_SLIDE(false); + +VI_VFP_LOOP_BASE +if (i != vl - 1) { + switch (P.VU.vsew) { + case e16: { + VI_XI_SLIDEDOWN_PARAMS(e16, 1); + vd = vs2; + } + break; + case e32: { + VI_XI_SLIDEDOWN_PARAMS(e32, 1); + vd = vs2; + } + break; + case e64: { + VI_XI_SLIDEDOWN_PARAMS(e64, 1); + vd = vs2; + } + break; + } +} else { + switch (P.VU.vsew) { + case e16: + P.VU.elt(rd_num, vl - 1, true) = f16(FRS1); + break; + case e32: + P.VU.elt(rd_num, vl - 1, true) = f32(FRS1); + break; + case e64: + P.VU.elt(rd_num, vl - 1, true) = f64(FRS1); + break; + } +} +VI_VFP_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vfslide1up_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfslide1up_vf.h new file mode 100644 index 00000000..b9c2817c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfslide1up_vf.h @@ -0,0 +1,36 @@ +//vfslide1up.vf vd, vs2, rs1 +VI_CHECK_SLIDE(true); + +VI_VFP_LOOP_BASE +if (i != 0) { + switch (P.VU.vsew) { + case e16: { + VI_XI_SLIDEUP_PARAMS(e16, 1); + vd = vs2; + } + break; + case e32: { + VI_XI_SLIDEUP_PARAMS(e32, 1); + vd = vs2; + } + break; + case e64: { + VI_XI_SLIDEUP_PARAMS(e64, 1); + vd = vs2; + } + break; + } +} else { + switch (P.VU.vsew) { + case e16: + P.VU.elt(rd_num, 0, true) = f16(FRS1); + break; + case e32: + P.VU.elt(rd_num, 0, true) = f32(FRS1); + break; + case e64: + P.VU.elt(rd_num, 0, true) = f64(FRS1); + break; + } +} +VI_VFP_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsqrt_v.h b/vendor/riscv-isa-sim/riscv/insns/vfsqrt_v.h new file mode 100644 index 00000000..86f0148d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsqrt_v.h @@ -0,0 +1,11 @@ +// vsqrt.v vd, vd2, vm +VI_VFP_V_LOOP +({ + vd = f16_sqrt(vs2); +}, +{ + vd = f32_sqrt(vs2); +}, +{ + vd = f64_sqrt(vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfsub_vf.h new file mode 100644 index 00000000..fc6877ca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsub_vf.h @@ -0,0 +1,11 @@ +// vfsub.vf vd, vs2, rs1 +VI_VFP_VF_LOOP +({ + vd = f16_sub(vs2, rs1); +}, +{ + vd = f32_sub(vs2, rs1); +}, +{ + vd = f64_sub(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfsub_vv.h new file mode 100644 index 00000000..b0403f11 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfsub_vv.h @@ -0,0 +1,11 @@ +// vfsub.vv vd, vs2, vs1 +VI_VFP_VV_LOOP +({ + vd = f16_sub(vs2, vs1); +}, +{ + vd = f32_sub(vs2, vs1); +}, +{ + vd = f64_sub(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwadd_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwadd_vf.h new file mode 100644 index 00000000..b8249001 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwadd_vf.h @@ -0,0 +1,8 @@ +// vfwadd.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_add(vs2, rs1); +}, +{ + vd = f64_add(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwadd_vv.h new file mode 100644 index 00000000..7255a50e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwadd_vv.h @@ -0,0 +1,8 @@ +// vfwadd.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_add(vs2, vs1); +}, +{ + vd = f64_add(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwadd_wf.h b/vendor/riscv-isa-sim/riscv/insns/vfwadd_wf.h new file mode 100644 index 00000000..021b17f0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwadd_wf.h @@ -0,0 +1,8 @@ +// vfwadd.wf vd, vs2, vs1 +VI_VFP_WF_LOOP_WIDE +({ + vd = f32_add(vs2, rs1); +}, +{ + vd = f64_add(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwadd_wv.h b/vendor/riscv-isa-sim/riscv/insns/vfwadd_wv.h new file mode 100644 index 00000000..c1ed0389 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwadd_wv.h @@ -0,0 +1,8 @@ +// vfwadd.wv vd, vs2, vs1 +VI_VFP_WV_LOOP_WIDE +({ + vd = f32_add(vs2, vs1); +}, +{ + vd = f64_add(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_f_v.h new file mode 100644 index 00000000..0700070a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_f_v.h @@ -0,0 +1,9 @@ +// vfwcvt.f.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_FP( + {;}, // BODY8 + { vd = f16_to_f32(vs2); }, // BODY16 + { vd = f32_to_f64(vs2); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('D'); } // CHECK32 +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_x_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_x_v.h new file mode 100644 index 00000000..f51e8e3e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_x_v.h @@ -0,0 +1,10 @@ +// vfwcvt.f.x.v vd, vs2, vm +VI_VFP_WCVT_INT_TO_FP( + { vd = i32_to_f16(vs2); }, // BODY8 + { vd = i32_to_f32(vs2); }, // BODY16 + { vd = i32_to_f64(vs2); }, // BODY32 + { require(p->extension_enabled(EXT_ZFH)); }, // CHECK8 + { require_extension('F'); }, // CHECK16 + { require_extension('D'); }, // CHECK32 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_xu_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_xu_v.h new file mode 100644 index 00000000..7dd49721 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_f_xu_v.h @@ -0,0 +1,10 @@ +// vfwcvt.f.xu.v vd, vs2, vm +VI_VFP_WCVT_INT_TO_FP( + { vd = ui32_to_f16(vs2); }, // BODY8 + { vd = ui32_to_f32(vs2); }, // BODY16 + { vd = ui32_to_f64(vs2); }, // BODY32 + { require(p->extension_enabled(EXT_ZFH)); }, // CHECK8 + { require_extension('F'); }, // CHECK16 + { require_extension('D'); }, // CHECK32 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_x_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_x_f_v.h new file mode 100644 index 00000000..74e5b9a0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_x_f_v.h @@ -0,0 +1,10 @@ +// vfwcvt.rtz.x.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_INT( + {;}, // BODY8 + { vd = f16_to_i32(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_i64(vs2, softfloat_round_minMag, true); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('F'); }, // CHECK32 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_xu_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_xu_f_v.h new file mode 100644 index 00000000..72b8c6ee --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_rtz_xu_f_v.h @@ -0,0 +1,10 @@ +// vfwcvt.rtz,xu.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_INT( + {;}, // BODY8 + { vd = f16_to_ui32(vs2, softfloat_round_minMag, true); }, // BODY16 + { vd = f32_to_ui64(vs2, softfloat_round_minMag, true); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('F'); }, // CHECK32 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_x_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_x_f_v.h new file mode 100644 index 00000000..74497f4a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_x_f_v.h @@ -0,0 +1,10 @@ +// vfwcvt.x.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_INT( + {;}, // BODY8 + { vd = f16_to_i32(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_i64(vs2, softfloat_roundingMode, true); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('F'); }, // CHECK32 + int // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwcvt_xu_f_v.h b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_xu_f_v.h new file mode 100644 index 00000000..ad96c9c3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwcvt_xu_f_v.h @@ -0,0 +1,10 @@ +// vfwcvt.xu.f.v vd, vs2, vm +VI_VFP_WCVT_FP_TO_INT( + {;}, // BODY8 + { vd = f16_to_ui32(vs2, softfloat_roundingMode, true); }, // BODY16 + { vd = f32_to_ui64(vs2, softfloat_roundingMode, true); }, // BODY32 + {;}, // CHECK8 + { require_extension(EXT_ZFH); }, // CHECK16 + { require_extension('F'); }, // CHECK32 + uint // sign +) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vf.h new file mode 100644 index 00000000..441fa0a7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vf.h @@ -0,0 +1,8 @@ +// vfwmacc.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mulAdd(rs1, vs2, vd); +}, +{ + vd = f64_mulAdd(rs1, vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vv.h new file mode 100644 index 00000000..a654198b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmacc_vv.h @@ -0,0 +1,8 @@ +// vfwmacc.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mulAdd(vs1, vs2, vd); +}, +{ + vd = f64_mulAdd(vs1, vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vf.h new file mode 100644 index 00000000..18010ff4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vf.h @@ -0,0 +1,8 @@ +// vfwmsac.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mulAdd(rs1, vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(rs1, vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vv.h new file mode 100644 index 00000000..9dc4073f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmsac_vv.h @@ -0,0 +1,8 @@ +// vfwmsac.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mulAdd(vs1, vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(vs1, vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmul_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwmul_vf.h new file mode 100644 index 00000000..2bb543f6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmul_vf.h @@ -0,0 +1,8 @@ +// vfwmul.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mul(vs2, rs1); +}, +{ + vd = f64_mul(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwmul_vv.h new file mode 100644 index 00000000..2ce38e62 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwmul_vv.h @@ -0,0 +1,8 @@ +// vfwmul.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mul(vs2, vs1); +}, +{ + vd = f64_mul(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vf.h new file mode 100644 index 00000000..038bda08 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vf.h @@ -0,0 +1,8 @@ +// vfwnmacc.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mulAdd(f32(rs1.v ^ F32_SIGN), vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(rs1.v ^ F64_SIGN), vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vv.h new file mode 100644 index 00000000..bf863e04 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwnmacc_vv.h @@ -0,0 +1,8 @@ +// vfwnmacc.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mulAdd(f32(vs1.v ^ F32_SIGN), vs2, f32(vd.v ^ F32_SIGN)); +}, +{ + vd = f64_mulAdd(f64(vs1.v ^ F64_SIGN), vs2, f64(vd.v ^ F64_SIGN)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vf.h new file mode 100644 index 00000000..1e288e1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vf.h @@ -0,0 +1,8 @@ +// vfwnmacc.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_mulAdd(f32(rs1.v ^ F32_SIGN), vs2, vd); +}, +{ + vd = f64_mulAdd(f64(rs1.v ^ F64_SIGN), vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vv.h new file mode 100644 index 00000000..ce97749e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwnmsac_vv.h @@ -0,0 +1,8 @@ +// vfwnmsac.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_mulAdd(f32(vs1.v ^ F32_SIGN), vs2, vd); +}, +{ + vd = f64_mulAdd(f64(vs1.v ^ F64_SIGN), vs2, vd); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwredosum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfwredosum_vs.h new file mode 100644 index 00000000..1f42d8ff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwredosum_vs.h @@ -0,0 +1,9 @@ +// vfwredosum.vs vd, vs2, vs1 +bool is_propagate = false; +VI_VFP_VV_LOOP_WIDE_REDUCTION +({ + vd_0 = f32_add(vd_0, vs2); +}, +{ + vd_0 = f64_add(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwredusum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vfwredusum_vs.h new file mode 100644 index 00000000..4ef28969 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwredusum_vs.h @@ -0,0 +1,9 @@ +// vfwredsum.vs vd, vs2, vs1 +bool is_propagate = true; +VI_VFP_VV_LOOP_WIDE_REDUCTION +({ + vd_0 = f32_add(vd_0, vs2); +}, +{ + vd_0 = f64_add(vd_0, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwsub_vf.h b/vendor/riscv-isa-sim/riscv/insns/vfwsub_vf.h new file mode 100644 index 00000000..8c376884 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwsub_vf.h @@ -0,0 +1,8 @@ +// vfwsub.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_WIDE +({ + vd = f32_sub(vs2, rs1); +}, +{ + vd = f64_sub(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vfwsub_vv.h new file mode 100644 index 00000000..ce08e36a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwsub_vv.h @@ -0,0 +1,8 @@ +// vfwsub.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_WIDE +({ + vd = f32_sub(vs2, vs1); +}, +{ + vd = f64_sub(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwsub_wf.h b/vendor/riscv-isa-sim/riscv/insns/vfwsub_wf.h new file mode 100644 index 00000000..f6f47ca5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwsub_wf.h @@ -0,0 +1,8 @@ +// vfwsub.wf vd, vs2, rs1 +VI_VFP_WF_LOOP_WIDE +({ + vd = f32_sub(vs2, rs1); +}, +{ + vd = f64_sub(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vfwsub_wv.h b/vendor/riscv-isa-sim/riscv/insns/vfwsub_wv.h new file mode 100644 index 00000000..eef904dc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vfwsub_wv.h @@ -0,0 +1,8 @@ +// vfwsub.wv vd, vs2, vs1 +VI_VFP_WV_LOOP_WIDE +({ + vd = f32_sub(vs2, vs1); +}, +{ + vd = f64_sub(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vid_v.h b/vendor/riscv-isa-sim/riscv/insns/vid_v.h new file mode 100644 index 00000000..c3162915 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vid_v.h @@ -0,0 +1,31 @@ +// vmpopc rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +reg_t vl = P.VU.vl->read(); +reg_t sew = P.VU.vsew; +reg_t rd_num = insn.rd(); +reg_t rs1_num = insn.rs1(); +reg_t rs2_num = insn.rs2(); +require_align(rd_num, P.VU.vflmul); +require_vm; + +for (reg_t i = P.VU.vstart->read() ; i < P.VU.vl->read(); ++i) { + VI_LOOP_ELEMENT_SKIP(); + + switch (sew) { + case e8: + P.VU.elt(rd_num, i, true) = i; + break; + case e16: + P.VU.elt(rd_num, i, true) = i; + break; + case e32: + P.VU.elt(rd_num, i, true) = i; + break; + default: + P.VU.elt(rd_num, i, true) = i; + break; + } +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/viota_m.h b/vendor/riscv-isa-sim/riscv/insns/viota_m.h new file mode 100644 index 00000000..f74f2c24 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/viota_m.h @@ -0,0 +1,53 @@ +// vmpopc rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +reg_t vl = P.VU.vl->read(); +reg_t sew = P.VU.vsew; +reg_t rd_num = insn.rd(); +reg_t rs1_num = insn.rs1(); +reg_t rs2_num = insn.rs2(); +require(P.VU.vstart->read() == 0); +require_vm; +require_align(rd_num, P.VU.vflmul); +require_noover(rd_num, P.VU.vflmul, rs2_num, 1); + +int cnt = 0; +for (reg_t i = 0; i < vl; ++i) { + const int midx = i / 64; + const int mpos = i % 64; + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx) >> mpos) & 0x1) == 1; + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + + bool has_one = false; + if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { + if (vs2_lsb) { + has_one = true; + } + } + + bool use_ori = (insn.v_vm() == 0) && !do_mask; + switch (sew) { + case e8: + P.VU.elt(rd_num, i, true) = use_ori ? + P.VU.elt(rd_num, i) : cnt; + break; + case e16: + P.VU.elt(rd_num, i, true) = use_ori ? + P.VU.elt(rd_num, i) : cnt; + break; + case e32: + P.VU.elt(rd_num, i, true) = use_ori ? + P.VU.elt(rd_num, i) : cnt; + break; + default: + P.VU.elt(rd_num, i, true) = use_ori ? + P.VU.elt(rd_num, i) : cnt; + break; + } + + if (has_one) { + cnt++; + } +} + diff --git a/vendor/riscv-isa-sim/riscv/insns/vl1re16_v.h b/vendor/riscv-isa-sim/riscv/insns/vl1re16_v.h new file mode 100644 index 00000000..220e83e6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl1re16_v.h @@ -0,0 +1,2 @@ +// vl1re16.v vd, (rs1) +VI_LD_WHOLE(uint16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl1re32_v.h b/vendor/riscv-isa-sim/riscv/insns/vl1re32_v.h new file mode 100644 index 00000000..e72ca02a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl1re32_v.h @@ -0,0 +1,2 @@ +// vl1re32.v vd, (rs1) +VI_LD_WHOLE(uint32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl1re64_v.h b/vendor/riscv-isa-sim/riscv/insns/vl1re64_v.h new file mode 100644 index 00000000..265701a0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl1re64_v.h @@ -0,0 +1,2 @@ +// vl1re64.v vd, (rs1) +VI_LD_WHOLE(uint64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl1re8_v.h b/vendor/riscv-isa-sim/riscv/insns/vl1re8_v.h new file mode 100644 index 00000000..b4ce6616 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl1re8_v.h @@ -0,0 +1,2 @@ +// vl1re8.v vd, (rs1) +VI_LD_WHOLE(uint8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl2re16_v.h b/vendor/riscv-isa-sim/riscv/insns/vl2re16_v.h new file mode 100644 index 00000000..2846edd9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl2re16_v.h @@ -0,0 +1,2 @@ +// vl2e16.v vd, (rs1) +VI_LD_WHOLE(uint16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl2re32_v.h b/vendor/riscv-isa-sim/riscv/insns/vl2re32_v.h new file mode 100644 index 00000000..5cea8355 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl2re32_v.h @@ -0,0 +1,2 @@ +// vl2re32.v vd, (rs1) +VI_LD_WHOLE(uint32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl2re64_v.h b/vendor/riscv-isa-sim/riscv/insns/vl2re64_v.h new file mode 100644 index 00000000..efdf2ce2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl2re64_v.h @@ -0,0 +1,2 @@ +// vl2re64.v vd, (rs1) +VI_LD_WHOLE(uint64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl2re8_v.h b/vendor/riscv-isa-sim/riscv/insns/vl2re8_v.h new file mode 100644 index 00000000..fcc3c4c0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl2re8_v.h @@ -0,0 +1,2 @@ +// vl2re8.v vd, (rs1) +VI_LD_WHOLE(uint8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl4re16_v.h b/vendor/riscv-isa-sim/riscv/insns/vl4re16_v.h new file mode 100644 index 00000000..03634183 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl4re16_v.h @@ -0,0 +1,2 @@ +// vl4re16.v vd, (rs1) +VI_LD_WHOLE(uint16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl4re32_v.h b/vendor/riscv-isa-sim/riscv/insns/vl4re32_v.h new file mode 100644 index 00000000..e37cc1ab --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl4re32_v.h @@ -0,0 +1,2 @@ +// vl4re32.v vd, (rs1) +VI_LD_WHOLE(uint32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl4re64_v.h b/vendor/riscv-isa-sim/riscv/insns/vl4re64_v.h new file mode 100644 index 00000000..11486f5d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl4re64_v.h @@ -0,0 +1,2 @@ +// vl4re64.v vd, (rs1) +VI_LD_WHOLE(uint64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl4re8_v.h b/vendor/riscv-isa-sim/riscv/insns/vl4re8_v.h new file mode 100644 index 00000000..f9ce3ff7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl4re8_v.h @@ -0,0 +1,2 @@ +// vl4re8.v vd, (rs1) +VI_LD_WHOLE(uint8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl8re16_v.h b/vendor/riscv-isa-sim/riscv/insns/vl8re16_v.h new file mode 100644 index 00000000..0b3f1413 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl8re16_v.h @@ -0,0 +1,2 @@ +// vl8re16.v vd, (rs1) +VI_LD_WHOLE(uint16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl8re32_v.h b/vendor/riscv-isa-sim/riscv/insns/vl8re32_v.h new file mode 100644 index 00000000..3372b89d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl8re32_v.h @@ -0,0 +1,2 @@ +// vl8re32.v vd, (rs1) +VI_LD_WHOLE(uint32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl8re64_v.h b/vendor/riscv-isa-sim/riscv/insns/vl8re64_v.h new file mode 100644 index 00000000..f9a9ca98 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl8re64_v.h @@ -0,0 +1,2 @@ +// vl8re64.v vd, (rs1) +VI_LD_WHOLE(uint64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vl8re8_v.h b/vendor/riscv-isa-sim/riscv/insns/vl8re8_v.h new file mode 100644 index 00000000..ee05e81a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vl8re8_v.h @@ -0,0 +1,2 @@ +// vl8re8.v vd, (rs1) +VI_LD_WHOLE(uint8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle16_v.h b/vendor/riscv-isa-sim/riscv/insns/vle16_v.h new file mode 100644 index 00000000..70bf39fb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle16_v.h @@ -0,0 +1,2 @@ +// vle16.v and vlseg[2-8]e16.v +VI_LD(0, (i * nf + fn), int16, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle16ff_v.h b/vendor/riscv-isa-sim/riscv/insns/vle16ff_v.h new file mode 100644 index 00000000..53c88891 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle16ff_v.h @@ -0,0 +1,2 @@ +// vle16ff.v and vlseg[2-8]e16ff.v +VI_LDST_FF(int16); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle32_v.h b/vendor/riscv-isa-sim/riscv/insns/vle32_v.h new file mode 100644 index 00000000..f1d0e73c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle32_v.h @@ -0,0 +1,2 @@ +// vle32.v and vlseg[2-8]e32.v +VI_LD(0, (i * nf + fn), int32, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle32ff_v.h b/vendor/riscv-isa-sim/riscv/insns/vle32ff_v.h new file mode 100644 index 00000000..7d03d7dd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle32ff_v.h @@ -0,0 +1,2 @@ +// vle32ff.v and vlseg[2-8]e32ff.v +VI_LDST_FF(int32); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle64_v.h b/vendor/riscv-isa-sim/riscv/insns/vle64_v.h new file mode 100644 index 00000000..86deb5cb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle64_v.h @@ -0,0 +1,2 @@ +// vle64.v and vlseg[2-8]e64.v +VI_LD(0, (i * nf + fn), int64, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle64ff_v.h b/vendor/riscv-isa-sim/riscv/insns/vle64ff_v.h new file mode 100644 index 00000000..39996da6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle64ff_v.h @@ -0,0 +1,2 @@ +// vle64ff.v and vlseg[2-8]e64ff.v +VI_LDST_FF(int64); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle8_v.h b/vendor/riscv-isa-sim/riscv/insns/vle8_v.h new file mode 100644 index 00000000..ffe17c3a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle8_v.h @@ -0,0 +1,2 @@ +// vle8.v and vlseg[2-8]e8.v +VI_LD(0, (i * nf + fn), int8, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vle8ff_v.h b/vendor/riscv-isa-sim/riscv/insns/vle8ff_v.h new file mode 100644 index 00000000..b56d1d33 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vle8ff_v.h @@ -0,0 +1,2 @@ +// vle8ff.v and vlseg[2-8]e8ff.v +VI_LDST_FF(int8); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlm_v.h b/vendor/riscv-isa-sim/riscv/insns/vlm_v.h new file mode 100644 index 00000000..6d3f83aa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlm_v.h @@ -0,0 +1,2 @@ +// vle1.v and vlseg[2-8]e8.v +VI_LD(0, (i * nf + fn), int8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vloxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vloxei16_v.h new file mode 100644 index 00000000..6e4ed49b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vloxei16_v.h @@ -0,0 +1,2 @@ +// vlxei16.v and vlxseg[2-8]e16.v +VI_LD_INDEX(e16, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vloxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vloxei32_v.h new file mode 100644 index 00000000..a7da8ff0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vloxei32_v.h @@ -0,0 +1,2 @@ +// vlxe32.v and vlxseg[2-8]ei32.v +VI_LD_INDEX(e32, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vloxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vloxei64_v.h new file mode 100644 index 00000000..067224e4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vloxei64_v.h @@ -0,0 +1,3 @@ +// vlxei64.v and vlxseg[2-8]ei64.v +VI_LD_INDEX(e64, true); + diff --git a/vendor/riscv-isa-sim/riscv/insns/vloxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vloxei8_v.h new file mode 100644 index 00000000..d2730499 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vloxei8_v.h @@ -0,0 +1,2 @@ +// vlxei8.v and vlxseg[2-8]ei8.v +VI_LD_INDEX(e8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlse16_v.h b/vendor/riscv-isa-sim/riscv/insns/vlse16_v.h new file mode 100644 index 00000000..5ac23a98 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlse16_v.h @@ -0,0 +1,2 @@ +// vlse16.v and vlsseg[2-8]e16.v +VI_LD(i * RS2, fn, int16, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlse32_v.h b/vendor/riscv-isa-sim/riscv/insns/vlse32_v.h new file mode 100644 index 00000000..cfd74fb9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlse32_v.h @@ -0,0 +1,2 @@ +// vlse32.v and vlsseg[2-8]e32.v +VI_LD(i * RS2, fn, int32, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlse64_v.h b/vendor/riscv-isa-sim/riscv/insns/vlse64_v.h new file mode 100644 index 00000000..2e339638 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlse64_v.h @@ -0,0 +1,2 @@ +// vlse64.v and vlsseg[2-8]e64.v +VI_LD(i * RS2, fn, int64, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vlse8_v.h b/vendor/riscv-isa-sim/riscv/insns/vlse8_v.h new file mode 100644 index 00000000..275f0224 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vlse8_v.h @@ -0,0 +1,2 @@ +// vlse8.v and vlsseg[2-8]e8.v +VI_LD(i * RS2, fn, int8, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vluxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vluxei16_v.h new file mode 100644 index 00000000..6e4ed49b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vluxei16_v.h @@ -0,0 +1,2 @@ +// vlxei16.v and vlxseg[2-8]e16.v +VI_LD_INDEX(e16, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vluxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vluxei32_v.h new file mode 100644 index 00000000..a7da8ff0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vluxei32_v.h @@ -0,0 +1,2 @@ +// vlxe32.v and vlxseg[2-8]ei32.v +VI_LD_INDEX(e32, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vluxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vluxei64_v.h new file mode 100644 index 00000000..067224e4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vluxei64_v.h @@ -0,0 +1,3 @@ +// vlxei64.v and vlxseg[2-8]ei64.v +VI_LD_INDEX(e64, true); + diff --git a/vendor/riscv-isa-sim/riscv/insns/vluxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vluxei8_v.h new file mode 100644 index 00000000..d2730499 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vluxei8_v.h @@ -0,0 +1,2 @@ +// vlxei8.v and vlxseg[2-8]ei8.v +VI_LD_INDEX(e8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmacc_vv.h new file mode 100644 index 00000000..e6ec93ff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmacc_vv.h @@ -0,0 +1,5 @@ +// vmacc.vv: vd[i] = +(vs1[i] * vs2[i]) + vd[i] +VI_VV_LOOP +({ + vd = vs1 * vs2 + vd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmacc_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmacc_vx.h new file mode 100644 index 00000000..d40b264a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmacc_vx.h @@ -0,0 +1,5 @@ +// vmacc.vx: vd[i] = +(x[rs1] * vs2[i]) + vd[i] +VI_VX_LOOP +({ + vd = rs1 * vs2 + vd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vi.h new file mode 100644 index 00000000..37da8adf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vi.h @@ -0,0 +1,2 @@ +// vmadc.vi vd, vs2, simm5 +#include "vmadc_vim.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vim.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vim.h new file mode 100644 index 00000000..a1f78fff --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vim.h @@ -0,0 +1,5 @@ +// vmadc.vim vd, vs2, simm5, v0 +VI_XI_LOOP_CARRY +({ + res = (((op_mask & simm5) + (op_mask & vs2) + carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vv.h new file mode 100644 index 00000000..e120fe63 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vv.h @@ -0,0 +1,2 @@ +// vmadc.vvm vd, vs2, rs1 +#include "vmadc_vvm.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vvm.h new file mode 100644 index 00000000..96a7f2cb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vvm.h @@ -0,0 +1,5 @@ +// vmadc.vvm vd, vs2, rs1, v0 +VI_VV_LOOP_CARRY +({ + res = (((op_mask & vs1) + (op_mask & vs2) + carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vx.h new file mode 100644 index 00000000..39c20b1e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vx.h @@ -0,0 +1,2 @@ +// vadc.vx vd, vs2, rs1 +#include "vmadc_vxm.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadc_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vmadc_vxm.h new file mode 100644 index 00000000..1561d858 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadc_vxm.h @@ -0,0 +1,5 @@ +// vadc.vx vd, vs2, rs1, v0 +VI_XI_LOOP_CARRY +({ + res = (((op_mask & rs1) + (op_mask & vs2) + carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmadd_vv.h new file mode 100644 index 00000000..a1c0d2ed --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadd_vv.h @@ -0,0 +1,5 @@ +// vmadd: vd[i] = (vd[i] * vs1[i]) + vs2[i] +VI_VV_LOOP +({ + vd = vd * vs1 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmadd_vx.h new file mode 100644 index 00000000..1a8a0015 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmadd_vx.h @@ -0,0 +1,5 @@ +// vmadd: vd[i] = (vd[i] * x[rs1]) + vs2[i] +VI_VX_LOOP +({ + vd = vd * rs1 + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmand_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmand_mm.h new file mode 100644 index 00000000..04615c60 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmand_mm.h @@ -0,0 +1,2 @@ +// vmand.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 & vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmandn_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmandn_mm.h new file mode 100644 index 00000000..e9a87cf4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmandn_mm.h @@ -0,0 +1,2 @@ +// vmandn.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 & ~vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmax_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmax_vv.h new file mode 100644 index 00000000..b9f15c5f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmax_vv.h @@ -0,0 +1,10 @@ +// vmax.vv vd, vs2, vs1, vm # Vector-vector +VI_VV_LOOP +({ + if (vs1 >= vs2) { + vd = vs1; + } else { + vd = vs2; + } + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmax_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmax_vx.h new file mode 100644 index 00000000..06f3f431 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmax_vx.h @@ -0,0 +1,10 @@ +// vmax.vx vd, vs2, rs1, vm # vector-scalar +VI_VX_LOOP +({ + if (rs1 >= vs2) { + vd = rs1; + } else { + vd = vs2; + } + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmaxu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmaxu_vv.h new file mode 100644 index 00000000..4e6868d1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmaxu_vv.h @@ -0,0 +1,9 @@ +// vmaxu.vv vd, vs2, vs1, vm # Vector-vector +VI_VV_ULOOP +({ + if (vs1 >= vs2) { + vd = vs1; + } else { + vd = vs2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmaxu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmaxu_vx.h new file mode 100644 index 00000000..cab89188 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmaxu_vx.h @@ -0,0 +1,9 @@ +// vmaxu.vx vd, vs2, rs1, vm # vector-scalar +VI_VX_ULOOP +({ + if (rs1 >= vs2) { + vd = rs1; + } else { + vd = vs2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmerge_vim.h b/vendor/riscv-isa-sim/riscv/insns/vmerge_vim.h new file mode 100644 index 00000000..0b2fac98 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmerge_vim.h @@ -0,0 +1,5 @@ +// vmerge.vim vd, vs2, simm5 +VI_VI_MERGE_LOOP +({ + vd = use_first ? simm5 : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmerge_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vmerge_vvm.h new file mode 100644 index 00000000..b60c1526 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmerge_vvm.h @@ -0,0 +1,5 @@ +// vmerge.vvm vd, vs2, vs1 +VI_VV_MERGE_LOOP +({ + vd = use_first ? vs1 : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmerge_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vmerge_vxm.h new file mode 100644 index 00000000..a22da8a1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmerge_vxm.h @@ -0,0 +1,5 @@ +// vmerge.vxm vd, vs2, rs1 +VI_VX_MERGE_LOOP +({ + vd = use_first ? rs1 : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfeq_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfeq_vf.h new file mode 100644 index 00000000..a4d7c50c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfeq_vf.h @@ -0,0 +1,11 @@ +// vmfeq.vf vd, vs2, fs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_eq(vs2, rs1); +}, +{ + res = f32_eq(vs2, rs1); +}, +{ + res = f64_eq(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfeq_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmfeq_vv.h new file mode 100644 index 00000000..b08ce980 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfeq_vv.h @@ -0,0 +1,11 @@ +// vmfeq.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_CMP +({ + res = f16_eq(vs2, vs1); +}, +{ + res = f32_eq(vs2, vs1); +}, +{ + res = f64_eq(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfge_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfge_vf.h new file mode 100644 index 00000000..ab4df5ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfge_vf.h @@ -0,0 +1,11 @@ +// vmfge.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_le(rs1, vs2); +}, +{ + res = f32_le(rs1, vs2); +}, +{ + res = f64_le(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfgt_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfgt_vf.h new file mode 100644 index 00000000..dcc3ea37 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfgt_vf.h @@ -0,0 +1,11 @@ +// vmfgt.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_lt(rs1, vs2); +}, +{ + res = f32_lt(rs1, vs2); +}, +{ + res = f64_lt(rs1, vs2); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfle_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfle_vf.h new file mode 100644 index 00000000..a942705d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfle_vf.h @@ -0,0 +1,11 @@ +// vmfle.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_le(vs2, rs1); +}, +{ + res = f32_le(vs2, rs1); +}, +{ + res = f64_le(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfle_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmfle_vv.h new file mode 100644 index 00000000..dd6f81da --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfle_vv.h @@ -0,0 +1,11 @@ +// vmfle.vv vd, vs2, rs1 +VI_VFP_VV_LOOP_CMP +({ + res = f16_le(vs2, vs1); +}, +{ + res = f32_le(vs2, vs1); +}, +{ + res = f64_le(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmflt_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmflt_vf.h new file mode 100644 index 00000000..110dbd1b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmflt_vf.h @@ -0,0 +1,11 @@ +// vmflt.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = f16_lt(vs2, rs1); +}, +{ + res = f32_lt(vs2, rs1); +}, +{ + res = f64_lt(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmflt_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmflt_vv.h new file mode 100644 index 00000000..35f8d702 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmflt_vv.h @@ -0,0 +1,11 @@ +// vmflt.vv vd, vs2, vs1 +VI_VFP_VV_LOOP_CMP +({ + res = f16_lt(vs2, vs1); +}, +{ + res = f32_lt(vs2, vs1); +}, +{ + res = f64_lt(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfne_vf.h b/vendor/riscv-isa-sim/riscv/insns/vmfne_vf.h new file mode 100644 index 00000000..1b61d571 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfne_vf.h @@ -0,0 +1,11 @@ +// vmfne.vf vd, vs2, rs1 +VI_VFP_VF_LOOP_CMP +({ + res = !f16_eq(vs2, rs1); +}, +{ + res = !f32_eq(vs2, rs1); +}, +{ + res = !f64_eq(vs2, rs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmfne_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmfne_vv.h new file mode 100644 index 00000000..4447c3cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmfne_vv.h @@ -0,0 +1,11 @@ +// vmfne.vv vd, vs2, rs1 +VI_VFP_VV_LOOP_CMP +({ + res = !f16_eq(vs2, vs1); +}, +{ + res = !f32_eq(vs2, vs1); +}, +{ + res = !f64_eq(vs2, vs1); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmin_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmin_vv.h new file mode 100644 index 00000000..21da0b3c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmin_vv.h @@ -0,0 +1,11 @@ +// vmin.vv vd, vs2, vs1, vm # Vector-vector +VI_VV_LOOP +({ + if (vs1 <= vs2) { + vd = vs1; + } else { + vd = vs2; + } + + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmin_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmin_vx.h new file mode 100644 index 00000000..3291776d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmin_vx.h @@ -0,0 +1,11 @@ +// vminx.vx vd, vs2, rs1, vm # vector-scalar +VI_VX_LOOP +({ + if (rs1 <= vs2) { + vd = rs1; + } else { + vd = vs2; + } + + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vminu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vminu_vv.h new file mode 100644 index 00000000..c0ab1958 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vminu_vv.h @@ -0,0 +1,9 @@ +// vminu.vv vd, vs2, vs1, vm # Vector-vector +VI_VV_ULOOP +({ + if (vs1 <= vs2) { + vd = vs1; + } else { + vd = vs2; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vminu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vminu_vx.h new file mode 100644 index 00000000..1055895a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vminu_vx.h @@ -0,0 +1,10 @@ +// vminu.vx vd, vs2, rs1, vm # vector-scalar +VI_VX_ULOOP +({ + if (rs1 <= vs2) { + vd = rs1; + } else { + vd = vs2; + } + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmnand_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmnand_mm.h new file mode 100644 index 00000000..5a3ab090 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmnand_mm.h @@ -0,0 +1,2 @@ +// vmnand.mm vd, vs2, vs1 +VI_LOOP_MASK(~(vs2 & vs1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmnor_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmnor_mm.h new file mode 100644 index 00000000..ab933786 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmnor_mm.h @@ -0,0 +1,2 @@ +// vmnor.mm vd, vs2, vs1 +VI_LOOP_MASK(~(vs2 | vs1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmor_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmor_mm.h new file mode 100644 index 00000000..32e71b93 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmor_mm.h @@ -0,0 +1,2 @@ +// vmor.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 | vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmorn_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmorn_mm.h new file mode 100644 index 00000000..23026f5c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmorn_mm.h @@ -0,0 +1,2 @@ +// vmorn.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 | ~vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vv.h new file mode 100644 index 00000000..a7bbba18 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vv.h @@ -0,0 +1,2 @@ +// vmsbc.vv vd, vs2, rs1 +#include "vmsbc_vvm.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbc_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vvm.h new file mode 100644 index 00000000..3225c62d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vvm.h @@ -0,0 +1,5 @@ +// vmsbc.vvm vd, vs2, rs1, v0 +VI_VV_LOOP_CARRY +({ + res = (((op_mask & vs2) - (op_mask & vs1) - carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbc_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vx.h new file mode 100644 index 00000000..cc6b9279 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vx.h @@ -0,0 +1,2 @@ +// vmsbc.vx vd, vs2, rs1 +#include "vmsbc_vxm.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbc_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vxm.h new file mode 100644 index 00000000..8cc46bad --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbc_vxm.h @@ -0,0 +1,5 @@ +// vmsbc.vxm vd, vs2, rs1, v0 +VI_XI_LOOP_CARRY +({ + res = (((op_mask & vs2) - (op_mask & rs1) - carry) >> sew) & 0x1u; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsbf_m.h b/vendor/riscv-isa-sim/riscv/insns/vmsbf_m.h new file mode 100644 index 00000000..6147f6de --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsbf_m.h @@ -0,0 +1,32 @@ +// vmsbf.m vd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +require(P.VU.vstart->read() == 0); +require_vm; +require(insn.rd() != insn.rs2()); + +reg_t vl = P.VU.vl->read(); +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); + +bool has_one = false; +for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + const int midx = i / 64; + const int mpos = i % 64; + const uint64_t mmask = UINT64_C(1) << mpos; \ + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx) >> mpos) & 0x1) == 1; + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + + + if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { + auto &vd = P.VU.elt(rd_num, midx, true); + uint64_t res = 0; + if (!has_one && !vs2_lsb) { + res = 1; + } else if(!has_one && vs2_lsb) { + has_one = true; + } + vd = (vd & ~mmask) | ((res << mpos) & mmask); + } +} diff --git a/vendor/riscv-isa-sim/riscv/insns/vmseq_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmseq_vi.h new file mode 100644 index 00000000..cfc16825 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmseq_vi.h @@ -0,0 +1,5 @@ +// vseq.vi vd, vs2, simm5 +VI_VI_LOOP_CMP +({ + res = simm5 == vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmseq_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmseq_vv.h new file mode 100644 index 00000000..91fd204a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmseq_vv.h @@ -0,0 +1,6 @@ +// vseq.vv vd, vs2, vs1 +VI_VV_LOOP_CMP +({ + res = vs2 == vs1; +}) + diff --git a/vendor/riscv-isa-sim/riscv/insns/vmseq_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmseq_vx.h new file mode 100644 index 00000000..ab633231 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmseq_vx.h @@ -0,0 +1,5 @@ +// vseq.vx vd, vs2, rs1 +VI_VX_LOOP_CMP +({ + res = rs1 == vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsgt_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsgt_vi.h new file mode 100644 index 00000000..4f7dea8e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsgt_vi.h @@ -0,0 +1,5 @@ +// vsgt.vi vd, vs2, simm5 +VI_VI_LOOP_CMP +({ + res = vs2 > simm5; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsgt_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsgt_vx.h new file mode 100644 index 00000000..5f24db69 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsgt_vx.h @@ -0,0 +1,5 @@ +// vsgt.vx vd, vs2, rs1 +VI_VX_LOOP_CMP +({ + res = vs2 > rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vi.h new file mode 100644 index 00000000..be28fee1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vi.h @@ -0,0 +1,5 @@ +// vmsgtu.vi vd, vd2, simm5 +VI_VI_ULOOP_CMP +({ + res = vs2 > (insn.v_simm5() & (UINT64_MAX >> (64 - P.VU.vsew))); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vx.h new file mode 100644 index 00000000..7f398008 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsgtu_vx.h @@ -0,0 +1,5 @@ +// vsgtu.vx vd, vs2, rs1 +VI_VX_ULOOP_CMP +({ + res = vs2 > rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsif_m.h b/vendor/riscv-isa-sim/riscv/insns/vmsif_m.h new file mode 100644 index 00000000..447813fe --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsif_m.h @@ -0,0 +1,32 @@ +// vmsif.m rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +require(P.VU.vstart->read() == 0); +require_vm; +require(insn.rd() != insn.rs2()); + +reg_t vl = P.VU.vl->read(); +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); + +bool has_one = false; +for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + const int midx = i / 64; + const int mpos = i % 64; + const uint64_t mmask = UINT64_C(1) << mpos; \ + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx ) >> mpos) & 0x1) == 1; + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + + if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { + auto &vd = P.VU.elt(rd_num, midx, true); + uint64_t res = 0; + if (!has_one && !vs2_lsb) { + res = 1; + } else if(!has_one && vs2_lsb) { + has_one = true; + res = 1; + } + vd = (vd & ~mmask) | ((res << mpos) & mmask); + } +} diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsle_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsle_vi.h new file mode 100644 index 00000000..f0f67d02 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsle_vi.h @@ -0,0 +1,5 @@ +// vsle.vi vd, vs2, simm5 +VI_VI_LOOP_CMP +({ + res = vs2 <= simm5; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsle_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsle_vv.h new file mode 100644 index 00000000..30aba06d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsle_vv.h @@ -0,0 +1,5 @@ +// vsle.vv vd, vs2, vs1 +VI_VV_LOOP_CMP +({ + res = vs2 <= vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsle_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsle_vx.h new file mode 100644 index 00000000..c26d5969 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsle_vx.h @@ -0,0 +1,5 @@ +// vsle.vx vd, vs2, rs1 +VI_VX_LOOP_CMP +({ + res = vs2 <= rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsleu_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vi.h new file mode 100644 index 00000000..0e66b781 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vi.h @@ -0,0 +1,5 @@ +// vmsleu.vi vd, vs2, simm5 +VI_VI_ULOOP_CMP +({ + res = vs2 <= (insn.v_simm5() & (UINT64_MAX >> (64 - P.VU.vsew))); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsleu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vv.h new file mode 100644 index 00000000..0e460326 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vv.h @@ -0,0 +1,5 @@ +// vsleu.vv vd, vs2, vs1 +VI_VV_ULOOP_CMP +({ + res = vs2 <= vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsleu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vx.h new file mode 100644 index 00000000..935b1768 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsleu_vx.h @@ -0,0 +1,5 @@ +// vsleu.vx vd, vs2, rs1 +VI_VX_ULOOP_CMP +({ + res = vs2 <= rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmslt_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmslt_vv.h new file mode 100644 index 00000000..71e6f87f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmslt_vv.h @@ -0,0 +1,5 @@ +// vslt.vv vd, vd2, vs1 +VI_VV_LOOP_CMP +({ + res = vs2 < vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmslt_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmslt_vx.h new file mode 100644 index 00000000..b32bb145 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmslt_vx.h @@ -0,0 +1,5 @@ +// vslt.vx vd, vs2, vs1 +VI_VX_LOOP_CMP +({ + res = vs2 < rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsltu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsltu_vv.h new file mode 100644 index 00000000..53a570ae --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsltu_vv.h @@ -0,0 +1,5 @@ +// vsltu.vv vd, vs2, vs1 +VI_VV_ULOOP_CMP +({ + res = vs2 < vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsltu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsltu_vx.h new file mode 100644 index 00000000..80825448 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsltu_vx.h @@ -0,0 +1,5 @@ +// vsltu.vx vd, vs2, vs1 +VI_VX_ULOOP_CMP +({ + res = vs2 < rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsne_vi.h b/vendor/riscv-isa-sim/riscv/insns/vmsne_vi.h new file mode 100644 index 00000000..5e9758ef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsne_vi.h @@ -0,0 +1,5 @@ +// vsne.vi vd, vs2, simm5 +VI_VI_LOOP_CMP +({ + res = vs2 != simm5; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsne_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmsne_vv.h new file mode 100644 index 00000000..e6a7174a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsne_vv.h @@ -0,0 +1,5 @@ +// vneq.vv vd, vs2, vs1 +VI_VV_LOOP_CMP +({ + res = vs2 != vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsne_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmsne_vx.h new file mode 100644 index 00000000..9e4c1553 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsne_vx.h @@ -0,0 +1,5 @@ +// vsne.vx vd, vs2, rs1 +VI_VX_LOOP_CMP +({ + res = vs2 != rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmsof_m.h b/vendor/riscv-isa-sim/riscv/insns/vmsof_m.h new file mode 100644 index 00000000..b9edcf3b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmsof_m.h @@ -0,0 +1,30 @@ +// vmsof.m rd, vs2, vm +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +require_vector(true); +require(P.VU.vstart->read() == 0); +require_vm; +require(insn.rd() != insn.rs2()); + +reg_t vl = P.VU.vl->read(); +reg_t rd_num = insn.rd(); +reg_t rs2_num = insn.rs2(); + +bool has_one = false; +for (reg_t i = P.VU.vstart->read() ; i < vl; ++i) { + const int midx = i / 64; + const int mpos = i % 64; + const uint64_t mmask = UINT64_C(1) << mpos; \ + + bool vs2_lsb = ((P.VU.elt(rs2_num, midx ) >> mpos) & 0x1) == 1; + bool do_mask = (P.VU.elt(0, midx) >> mpos) & 0x1; + + if (insn.v_vm() == 1 || (insn.v_vm() == 0 && do_mask)) { + uint64_t &vd = P.VU.elt(rd_num, midx, true); + uint64_t res = 0; + if(!has_one && vs2_lsb) { + has_one = true; + res = 1; + } + vd = (vd & ~mmask) | ((res << mpos) & mmask); + } +} diff --git a/vendor/riscv-isa-sim/riscv/insns/vmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmul_vv.h new file mode 100644 index 00000000..a3278171 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmul_vv.h @@ -0,0 +1,5 @@ +// vmul vd, vs2, vs1 +VI_VV_LOOP +({ + vd = vs2 * vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmul_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmul_vx.h new file mode 100644 index 00000000..8d683902 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmul_vx.h @@ -0,0 +1,5 @@ +// vmul vd, vs2, rs1 +VI_VX_LOOP +({ + vd = vs2 * rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulh_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmulh_vv.h new file mode 100644 index 00000000..e861a339 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulh_vv.h @@ -0,0 +1,5 @@ +// vmulh vd, vs2, vs1 +VI_VV_LOOP +({ + vd = ((int128_t)vs2 * vs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulh_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmulh_vx.h new file mode 100644 index 00000000..b6b55036 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulh_vx.h @@ -0,0 +1,5 @@ +// vmulh vd, vs2, rs1 +VI_VX_LOOP +({ + vd = ((int128_t)vs2 * rs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vv.h new file mode 100644 index 00000000..e1c0ba60 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vv.h @@ -0,0 +1,4 @@ +// vmulhsu.vv vd, vs2, vs1 +VI_VV_SU_LOOP({ + vd = ((int128_t)vs2 * (uint128_t)vs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vx.h new file mode 100644 index 00000000..4619ea89 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulhsu_vx.h @@ -0,0 +1,4 @@ +// vmulhsu.vx vd, vs2, rs1 +VI_VX_SU_LOOP({ + vd = ((int128_t)vs2 * (uint128_t)rs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulhu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vmulhu_vv.h new file mode 100644 index 00000000..8e318edb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulhu_vv.h @@ -0,0 +1,5 @@ +// vmulhu vd ,vs2, vs1 +VI_VV_ULOOP +({ + vd = ((uint128_t)vs2 * vs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmulhu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vmulhu_vx.h new file mode 100644 index 00000000..672ad32d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmulhu_vx.h @@ -0,0 +1,5 @@ +// vmulhu vd ,vs2, rs1 +VI_VX_ULOOP +({ + vd = ((uint128_t)vs2 * rs1) >> sew; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv1r_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv1r_v.h new file mode 100644 index 00000000..bbdeab9a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv1r_v.h @@ -0,0 +1,2 @@ +// vmv1r.v vd, vs2 +#include "vmvnfr_v.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv2r_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv2r_v.h new file mode 100644 index 00000000..1ac8e09e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv2r_v.h @@ -0,0 +1,2 @@ +// vmv2r.v vd, vs2 +#include "vmvnfr_v.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv4r_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv4r_v.h new file mode 100644 index 00000000..2068731a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv4r_v.h @@ -0,0 +1,2 @@ +// vmv4r.v vd, vs2 +#include "vmvnfr_v.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv8r_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv8r_v.h new file mode 100644 index 00000000..2b205fc7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv8r_v.h @@ -0,0 +1,2 @@ +// vmv8r.v vd, vs2 +#include "vmvnfr_v.h" diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_s_x.h b/vendor/riscv-isa-sim/riscv/insns/vmv_s_x.h new file mode 100644 index 00000000..b66855be --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_s_x.h @@ -0,0 +1,29 @@ +// vmv_s_x: vd[0] = rs1 +require_vector(true); +require(insn.v_vm() == 1); +require(P.VU.vsew >= e8 && P.VU.vsew <= e64); +reg_t vl = P.VU.vl->read(); + +if (vl > 0 && P.VU.vstart->read() < vl) { + reg_t rd_num = insn.rd(); + reg_t sew = P.VU.vsew; + + switch(sew) { + case e8: + P.VU.elt(rd_num, 0, true) = RS1; + break; + case e16: + P.VU.elt(rd_num, 0, true) = RS1; + break; + case e32: + P.VU.elt(rd_num, 0, true) = RS1; + break; + default: + P.VU.elt(rd_num, 0, true) = RS1; + break; + } + + vl = 0; +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_v_i.h b/vendor/riscv-isa-sim/riscv/insns/vmv_v_i.h new file mode 100644 index 00000000..3d5737e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_v_i.h @@ -0,0 +1,5 @@ +// vmv.v.i vd, simm5 +VI_VI_MERGE_LOOP +({ + vd = simm5; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_v_v.h b/vendor/riscv-isa-sim/riscv/insns/vmv_v_v.h new file mode 100644 index 00000000..429f5a33 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_v_v.h @@ -0,0 +1,5 @@ +// vvmv.v.v vd, vs1 +VI_VV_MERGE_LOOP +({ + vd = vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_v_x.h b/vendor/riscv-isa-sim/riscv/insns/vmv_v_x.h new file mode 100644 index 00000000..1eac782e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_v_x.h @@ -0,0 +1,5 @@ +// vmv.v.x vd, rs1 +VI_VX_MERGE_LOOP +({ + vd = rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vmv_x_s.h b/vendor/riscv-isa-sim/riscv/insns/vmv_x_s.h new file mode 100644 index 00000000..d33c3e5d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmv_x_s.h @@ -0,0 +1,27 @@ +// vmv_x_s: rd = vs2[0] +require_vector(true); +require(insn.v_vm() == 1); +uint64_t xmask = UINT64_MAX >> (64 - P.get_isa().get_max_xlen()); +reg_t rs1 = RS1; +reg_t sew = P.VU.vsew; +reg_t rs2_num = insn.rs2(); + +switch(sew) { +case e8: + WRITE_RD(P.VU.elt(rs2_num, 0)); + break; +case e16: + WRITE_RD(P.VU.elt(rs2_num, 0)); + break; +case e32: + WRITE_RD(P.VU.elt(rs2_num, 0)); + break; +case e64: + if (P.get_isa().get_max_xlen() <= sew) + WRITE_RD(P.VU.elt(rs2_num, 0) & xmask); + else + WRITE_RD(P.VU.elt(rs2_num, 0)); + break; +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmvnfr_v.h b/vendor/riscv-isa-sim/riscv/insns/vmvnfr_v.h new file mode 100644 index 00000000..f6dc2c08 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmvnfr_v.h @@ -0,0 +1,28 @@ +// vmv1r.v vd, vs2 +require_vector_novtype(true, true); +const reg_t baseAddr = RS1; +const reg_t vd = insn.rd(); +const reg_t vs2 = insn.rs2(); +const reg_t len = insn.rs1() + 1; +require_align(vd, len); +require_align(vs2, len); +const reg_t size = len * P.VU.vlenb; +const reg_t start = P.VU.vstart->read() * (P.VU.vsew >> 3); + +//register needs one-by-one copy to keep commitlog correct +if (vd != vs2 && start < size) { + reg_t i = start / P.VU.vlenb; + reg_t off = start % P.VU.vlenb; + if (off) { + memcpy(&P.VU.elt(vd + i, off, true), + &P.VU.elt(vs2 + i, off), P.VU.vlenb - off); + i++; + } + + for (; i < len; ++i) { + memcpy(&P.VU.elt(vd + i, 0, true), + &P.VU.elt(vs2 + i, 0), P.VU.vlenb); + } +} + +P.VU.vstart->write(0); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmxnor_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmxnor_mm.h new file mode 100644 index 00000000..0736d5b2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmxnor_mm.h @@ -0,0 +1,2 @@ +// vmnxor.mm vd, vs2, vs1 +VI_LOOP_MASK(~(vs2 ^ vs1)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vmxor_mm.h b/vendor/riscv-isa-sim/riscv/insns/vmxor_mm.h new file mode 100644 index 00000000..7f0c576e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vmxor_mm.h @@ -0,0 +1,2 @@ +// vmxor.mm vd, vs2, vs1 +VI_LOOP_MASK(vs2 ^ vs1); diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclip_wi.h b/vendor/riscv-isa-sim/riscv/insns/vnclip_wi.h new file mode 100644 index 00000000..ea6898cf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclip_wi.h @@ -0,0 +1,25 @@ +// vnclip: vd[i] = clip(round(vs2[i] + rnd) >> simm) +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +VI_VI_LOOP_NARROW +({ + int128_t result = vs2; + unsigned shift = zimm5 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result < int_min) { + result = int_min; + P_SET_OV(1); + } else if (result > int_max) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclip_wv.h b/vendor/riscv-isa-sim/riscv/insns/vnclip_wv.h new file mode 100644 index 00000000..63b84c65 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclip_wv.h @@ -0,0 +1,25 @@ +// vnclip: vd[i] = clip(round(vs2[i] + rnd) >> vs1[i]) +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +VI_VV_LOOP_NARROW +({ + int128_t result = vs2; + unsigned shift = vs1 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result < int_min) { + result = int_min; + P_SET_OV(1); + } else if (result > int_max) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclip_wx.h b/vendor/riscv-isa-sim/riscv/insns/vnclip_wx.h new file mode 100644 index 00000000..482eace4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclip_wx.h @@ -0,0 +1,25 @@ +// vnclip: vd[i] = clip(round(vs2[i] + rnd) >> rs1[i]) +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +VI_VX_LOOP_NARROW +({ + int128_t result = vs2; + unsigned shift = rs1 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result < int_min) { + result = int_min; + P_SET_OV(1); + } else if (result > int_max) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclipu_wi.h b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wi.h new file mode 100644 index 00000000..441a3a7d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wi.h @@ -0,0 +1,23 @@ +// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> simm) +VRM xrm = P.VU.get_vround_mode(); +uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew); +uint64_t sign_mask = UINT64_MAX << P.VU.vsew; +VI_VI_LOOP_NARROW +({ + uint128_t result = vs2_u; + unsigned shift = zimm5 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + // unsigned shifting to rs1 + result = result >> shift; + + // saturation + if (result & sign_mask) { + result = uint_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclipu_wv.h b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wv.h new file mode 100644 index 00000000..80724899 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wv.h @@ -0,0 +1,22 @@ +// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> vs1[i]) +VRM xrm = P.VU.get_vround_mode(); +uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew); +uint64_t sign_mask = UINT64_MAX << P.VU.vsew; +VI_VV_LOOP_NARROW +({ + uint128_t result = vs2_u; + unsigned shift = vs1 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result & sign_mask) { + result = uint_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnclipu_wx.h b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wx.h new file mode 100644 index 00000000..b2d91c33 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnclipu_wx.h @@ -0,0 +1,22 @@ +// vnclipu: vd[i] = clip(round(vs2[i] + rnd) >> rs1[i]) +VRM xrm = P.VU.get_vround_mode(); +uint64_t uint_max = UINT64_MAX >> (64 - P.VU.vsew); +uint64_t sign_mask = UINT64_MAX << P.VU.vsew; +VI_VX_LOOP_NARROW +({ + uint128_t result = vs2_u; + unsigned shift = rs1 & ((sew * 2) - 1); + + // rounding + INT_ROUNDING(result, xrm, shift); + + result = result >> shift; + + // saturation + if (result & sign_mask) { + result = uint_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnmsac_vv.h b/vendor/riscv-isa-sim/riscv/insns/vnmsac_vv.h new file mode 100644 index 00000000..7c10f29a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnmsac_vv.h @@ -0,0 +1,5 @@ +// vmsac.vv: vd[i] = -(vs1[i] * vs2[i]) + vd[i] +VI_VV_LOOP +({ + vd = -(vs1 * vs2) + vd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnmsac_vx.h b/vendor/riscv-isa-sim/riscv/insns/vnmsac_vx.h new file mode 100644 index 00000000..44920be4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnmsac_vx.h @@ -0,0 +1,5 @@ +// vmsac: vd[i] = -(x[rs1] * vs2[i]) + vd[i] +VI_VX_LOOP +({ + vd = -(rs1 * vs2) + vd; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnmsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vnmsub_vv.h new file mode 100644 index 00000000..37f82286 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnmsub_vv.h @@ -0,0 +1,5 @@ +// vnmsub.vv: vd[i] = -(vd[i] * vs1[i]) + vs2[i] +VI_VV_LOOP +({ + vd = -(vd * vs1) + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnmsub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vnmsub_vx.h new file mode 100644 index 00000000..2e00d22e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnmsub_vx.h @@ -0,0 +1,5 @@ +// vnmsub.vx: vd[i] = -(vd[i] * x[rs1]) + vs2[i] +VI_VX_LOOP +({ + vd = -(vd * rs1) + vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsra_wi.h b/vendor/riscv-isa-sim/riscv/insns/vnsra_wi.h new file mode 100644 index 00000000..0502ff1a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsra_wi.h @@ -0,0 +1,5 @@ +// vnsra.vi vd, vs2, zimm5 +VI_VI_LOOP_NSHIFT +({ + vd = vs2 >> (zimm5 & (sew * 2 - 1) & 0x1f); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsra_wv.h b/vendor/riscv-isa-sim/riscv/insns/vnsra_wv.h new file mode 100644 index 00000000..555ce3fb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsra_wv.h @@ -0,0 +1,5 @@ +// vnsra.vv vd, vs2, vs1 +VI_VV_LOOP_NSHIFT +({ + vd = vs2 >> (vs1 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsra_wx.h b/vendor/riscv-isa-sim/riscv/insns/vnsra_wx.h new file mode 100644 index 00000000..05a55e3e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsra_wx.h @@ -0,0 +1,5 @@ +// vnsra.vx vd, vs2, rs1 +VI_VX_LOOP_NSHIFT +({ + vd = vs2 >> (rs1 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsrl_wi.h b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wi.h new file mode 100644 index 00000000..d4dfcf07 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wi.h @@ -0,0 +1,5 @@ +// vnsrl.vi vd, vs2, zimm5 +VI_VI_LOOP_NSHIFT +({ + vd = vs2_u >> (zimm5 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsrl_wv.h b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wv.h new file mode 100644 index 00000000..ab72b849 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wv.h @@ -0,0 +1,5 @@ +// vnsrl.vv vd, vs2, vs1 +VI_VV_LOOP_NSHIFT +({ + vd = vs2_u >> (vs1 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vnsrl_wx.h b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wx.h new file mode 100644 index 00000000..e149b38d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vnsrl_wx.h @@ -0,0 +1,5 @@ +// vnsrl.vx vd, vs2, rs1 +VI_VX_LOOP_NSHIFT +({ + vd = vs2_u >> (rs1 & (sew * 2 - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vor_vi.h b/vendor/riscv-isa-sim/riscv/insns/vor_vi.h new file mode 100644 index 00000000..f7596074 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vor_vi.h @@ -0,0 +1,5 @@ +// vor +VI_VI_LOOP +({ + vd = simm5 | vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vor_vv.h b/vendor/riscv-isa-sim/riscv/insns/vor_vv.h new file mode 100644 index 00000000..0c460662 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vor_vv.h @@ -0,0 +1,5 @@ +// vor +VI_VV_LOOP +({ + vd = vs1 | vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vor_vx.h b/vendor/riscv-isa-sim/riscv/insns/vor_vx.h new file mode 100644 index 00000000..01c003ab --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vor_vx.h @@ -0,0 +1,5 @@ +// vor +VI_VX_LOOP +({ + vd = rs1 | vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredand_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredand_vs.h new file mode 100644 index 00000000..6c2d9089 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredand_vs.h @@ -0,0 +1,5 @@ +// vredand.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res &= vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredmax_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredmax_vs.h new file mode 100644 index 00000000..be2e76ab --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredmax_vs.h @@ -0,0 +1,5 @@ +// vredmax.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredmaxu_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredmaxu_vs.h new file mode 100644 index 00000000..960f4861 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredmaxu_vs.h @@ -0,0 +1,5 @@ +// vredmaxu.vs vd, vs2 ,vs1 +VI_VV_ULOOP_REDUCTION +({ + vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredmin_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredmin_vs.h new file mode 100644 index 00000000..50359b7a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredmin_vs.h @@ -0,0 +1,5 @@ +// vredmin.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredminu_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredminu_vs.h new file mode 100644 index 00000000..70824759 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredminu_vs.h @@ -0,0 +1,5 @@ +// vredminu.vs vd, vs2 ,vs1 +VI_VV_ULOOP_REDUCTION +({ + vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredor_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredor_vs.h new file mode 100644 index 00000000..f7acd9aa --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredor_vs.h @@ -0,0 +1,5 @@ +// vredor.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res |= vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredsum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredsum_vs.h new file mode 100644 index 00000000..c4fefe57 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredsum_vs.h @@ -0,0 +1,5 @@ +// vredsum.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res += vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vredxor_vs.h b/vendor/riscv-isa-sim/riscv/insns/vredxor_vs.h new file mode 100644 index 00000000..bb81ad9a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vredxor_vs.h @@ -0,0 +1,5 @@ +// vredxor.vs vd, vs2 ,vs1 +VI_VV_LOOP_REDUCTION +({ + vd_0_res ^= vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vrem_vv.h b/vendor/riscv-isa-sim/riscv/insns/vrem_vv.h new file mode 100644 index 00000000..260716a0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrem_vv.h @@ -0,0 +1,11 @@ +// vrem.vv vd, vs2, vs1 +VI_VV_LOOP +({ + if (vs1 == 0) + vd = vs2; + else if(vs2 == -(((intmax_t)1) << (sew - 1)) && vs1 == -1) + vd = 0; + else { + vd = vs2 % vs1; + } +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vrem_vx.h b/vendor/riscv-isa-sim/riscv/insns/vrem_vx.h new file mode 100644 index 00000000..3702f02f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrem_vx.h @@ -0,0 +1,10 @@ +// vrem.vx vd, vs2, rs1 +VI_VX_LOOP +({ + if (rs1 == 0) + vd = vs2; + else if (vs2 == -(((intmax_t)1) << (sew - 1)) && rs1 == -1) + vd = 0; + else + vd = vs2 % rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vremu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vremu_vv.h new file mode 100644 index 00000000..7e150723 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vremu_vv.h @@ -0,0 +1,8 @@ +// vremu.vv vd, vs2, vs1 +VI_VV_ULOOP +({ + if (vs1 == 0) + vd = vs2; + else + vd = vs2 % vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vremu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vremu_vx.h new file mode 100644 index 00000000..a87a8200 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vremu_vx.h @@ -0,0 +1,8 @@ +// vremu.vx vd, vs2, rs1 +VI_VX_ULOOP +({ + if (rs1 == 0) + vd = vs2; + else + vd = vs2 % rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vrgather_vi.h b/vendor/riscv-isa-sim/riscv/insns/vrgather_vi.h new file mode 100644 index 00000000..56e11e16 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrgather_vi.h @@ -0,0 +1,30 @@ +// vrgather.vi vd, vs2, zimm5 vm # vd[i] = (zimm5 >= VLMAX) ? 0 : vs2[zimm5]; +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require(insn.rd() != insn.rs2()); +require_vm; + +reg_t zimm5 = insn.v_zimm5(); + +VI_LOOP_BASE + +for (reg_t i = P.VU.vstart->read(); i < vl; ++i) { + VI_LOOP_ELEMENT_SKIP(); + + switch (sew) { + case e8: + P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); + break; + case e16: + P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); + break; + case e32: + P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); + break; + default: + P.VU.elt(rd_num, i, true) = zimm5 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, zimm5); + break; + } +} + +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vrgather_vv.h b/vendor/riscv-isa-sim/riscv/insns/vrgather_vv.h new file mode 100644 index 00000000..a3a32f56 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrgather_vv.h @@ -0,0 +1,32 @@ +// vrgather.vv vd, vs2, vs1, vm # vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require_align(insn.rs1(), P.VU.vflmul); +require(insn.rd() != insn.rs2() && insn.rd() != insn.rs1()); +require_vm; + +VI_LOOP_BASE + switch (sew) { + case e8: { + auto vs1 = P.VU.elt(rs1_num, i); + //if (i > 255) continue; + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + case e16: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + case e32: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + default: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + } +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vrgather_vx.h b/vendor/riscv-isa-sim/riscv/insns/vrgather_vx.h new file mode 100644 index 00000000..058ffae1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrgather_vx.h @@ -0,0 +1,24 @@ +// vrgather.vx vd, vs2, rs1, vm # vd[i] = (rs1 >= VLMAX) ? 0 : vs2[rs1]; +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require(insn.rd() != insn.rs2()); +require_vm; + +reg_t rs1 = RS1; + +VI_LOOP_BASE + switch (sew) { + case e8: + P.VU.elt(rd_num, i, true) = rs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, rs1); + break; + case e16: + P.VU.elt(rd_num, i, true) = rs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, rs1); + break; + case e32: + P.VU.elt(rd_num, i, true) = rs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, rs1); + break; + default: + P.VU.elt(rd_num, i, true) = rs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, rs1); + break; + } +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vrgatherei16_vv.h b/vendor/riscv-isa-sim/riscv/insns/vrgatherei16_vv.h new file mode 100644 index 00000000..3bb166a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrgatherei16_vv.h @@ -0,0 +1,34 @@ +// vrgatherei16.vv vd, vs2, vs1, vm # vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; +float vemul = (16.0 / P.VU.vsew * P.VU.vflmul); +require(vemul >= 0.125 && vemul <= 8); +require_align(insn.rd(), P.VU.vflmul); +require_align(insn.rs2(), P.VU.vflmul); +require_align(insn.rs1(), vemul); +require_noover(insn.rd(), P.VU.vflmul, insn.rs1(), vemul); +require(insn.rd() != insn.rs2()); +require_vm; + +VI_LOOP_BASE + switch (sew) { + case e8: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + case e16: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + case e32: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + default: { + auto vs1 = P.VU.elt(rs1_num, i); + P.VU.elt(rd_num, i, true) = vs1 >= P.VU.vlmax ? 0 : P.VU.elt(rs2_num, vs1); + break; + } + } +VI_LOOP_END; diff --git a/vendor/riscv-isa-sim/riscv/insns/vrsub_vi.h b/vendor/riscv-isa-sim/riscv/insns/vrsub_vi.h new file mode 100644 index 00000000..198c33f9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrsub_vi.h @@ -0,0 +1,5 @@ +// vrsub.vi vd, vs2, imm, vm # vd[i] = imm - vs2[i] +VI_VI_LOOP +({ + vd = simm5 - vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vrsub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vrsub_vx.h new file mode 100644 index 00000000..bfd62594 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vrsub_vx.h @@ -0,0 +1,5 @@ +// vrsub.vx vd, vs2, rs1, vm # vd[i] = rs1 - vs2[i] +VI_VX_LOOP +({ + vd = rs1 - vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vs1r_v.h b/vendor/riscv-isa-sim/riscv/insns/vs1r_v.h new file mode 100644 index 00000000..1932ec0b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vs1r_v.h @@ -0,0 +1,2 @@ +// vs1r.v vs3, (rs1) +VI_ST_WHOLE diff --git a/vendor/riscv-isa-sim/riscv/insns/vs2r_v.h b/vendor/riscv-isa-sim/riscv/insns/vs2r_v.h new file mode 100644 index 00000000..2e515b47 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vs2r_v.h @@ -0,0 +1,2 @@ +// vs2r.v vs3, (rs1) +VI_ST_WHOLE diff --git a/vendor/riscv-isa-sim/riscv/insns/vs4r_v.h b/vendor/riscv-isa-sim/riscv/insns/vs4r_v.h new file mode 100644 index 00000000..161bf89a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vs4r_v.h @@ -0,0 +1,2 @@ +// vs4r.v vs3, (rs1) +VI_ST_WHOLE diff --git a/vendor/riscv-isa-sim/riscv/insns/vs8r_v.h b/vendor/riscv-isa-sim/riscv/insns/vs8r_v.h new file mode 100644 index 00000000..1ad25756 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vs8r_v.h @@ -0,0 +1,2 @@ +// vs8r.v vs3, (rs1) +VI_ST_WHOLE diff --git a/vendor/riscv-isa-sim/riscv/insns/vsadd_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsadd_vi.h new file mode 100644 index 00000000..7e3b652e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsadd_vi.h @@ -0,0 +1,28 @@ +// vsadd.vi vd, vs2 simm5 +VI_CHECK_SSS(false); +VI_LOOP_BASE +bool sat = false; +switch(sew) { +case e8: { + VI_PARAMS(e8); + vd = sat_add(vs2, vsext(simm5, sew), sat); + break; +} +case e16: { + VI_PARAMS(e16); + vd = sat_add(vs2, vsext(simm5, sew), sat); + break; +} +case e32: { + VI_PARAMS(e32); + vd = sat_add(vs2, vsext(simm5, sew), sat); + break; +} +default: { + VI_PARAMS(e64); + vd = sat_add(vs2, vsext(simm5, sew), sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsadd_vv.h new file mode 100644 index 00000000..60ad5f3c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsadd_vv.h @@ -0,0 +1,28 @@ +// vsadd.vv vd, vs2, vs1 +VI_CHECK_SSS(true); +VI_LOOP_BASE +bool sat = false; +switch(sew) { +case e8: { + VV_PARAMS(e8); + vd = sat_add(vs2, vs1, sat); + break; +} +case e16: { + VV_PARAMS(e16); + vd = sat_add(vs2, vs1, sat); + break; +} +case e32: { + VV_PARAMS(e32); + vd = sat_add(vs2, vs1, sat); + break; +} +default: { + VV_PARAMS(e64); + vd = sat_add(vs2, vs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsadd_vx.h new file mode 100644 index 00000000..bf68f151 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsadd_vx.h @@ -0,0 +1,28 @@ +// vsadd.vx vd, vs2, rs1 +VI_CHECK_SSS(false); +VI_LOOP_BASE +bool sat = false; +switch(sew) { +case e8: { + VX_PARAMS(e8); + vd = sat_add(vs2, rs1, sat); + break; +} +case e16: { + VX_PARAMS(e16); + vd = sat_add(vs2, rs1, sat); + break; +} +case e32: { + VX_PARAMS(e32); + vd = sat_add(vs2, rs1, sat); + break; +} +default: { + VX_PARAMS(e64); + vd = sat_add(vs2, rs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsaddu_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vi.h new file mode 100644 index 00000000..38607140 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vi.h @@ -0,0 +1,11 @@ +// vsaddu vd, vs2, zimm5 +VI_VI_ULOOP +({ + bool sat = false; + vd = vs2 + (insn.v_simm5() & (UINT64_MAX >> (64 - P.VU.vsew))); + + sat = vd < vs2; + vd |= -(vd < vs2); + + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsaddu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vv.h new file mode 100644 index 00000000..a0cba811 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vv.h @@ -0,0 +1,11 @@ +// vsaddu vd, vs2, vs1 +VI_VV_ULOOP +({ + bool sat = false; + vd = vs2 + vs1; + + sat = vd < vs2; + vd |= -(vd < vs2); + + P_SET_OV(sat); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsaddu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vx.h new file mode 100644 index 00000000..c0a7d872 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsaddu_vx.h @@ -0,0 +1,12 @@ +// vsaddu vd, vs2, rs1 +VI_VX_ULOOP +({ + bool sat = false; + vd = vs2 + rs1; + + sat = vd < vs2; + vd |= -(vd < vs2); + + P_SET_OV(sat); + +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsbc_vvm.h b/vendor/riscv-isa-sim/riscv/insns/vsbc_vvm.h new file mode 100644 index 00000000..8ab6d446 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsbc_vvm.h @@ -0,0 +1,5 @@ +// vsbc.vvm vd, vs2, rs1, v0 +VI_VV_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & vs2) - (op_mask & vs1) - carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsbc_vxm.h b/vendor/riscv-isa-sim/riscv/insns/vsbc_vxm.h new file mode 100644 index 00000000..fc983218 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsbc_vxm.h @@ -0,0 +1,5 @@ +// vsbc.vxm vd, vs2, rs1, v0 +VI_XI_LOOP_WITH_CARRY +({ + vd = (uint128_t)((op_mask & vs2) - (op_mask & rs1) - carry); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vse16_v.h b/vendor/riscv-isa-sim/riscv/insns/vse16_v.h new file mode 100644 index 00000000..9f9afecb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vse16_v.h @@ -0,0 +1,2 @@ +// vse16.v and vsseg[2-8]e16.v +VI_ST(0, (i * nf + fn), uint16, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vse32_v.h b/vendor/riscv-isa-sim/riscv/insns/vse32_v.h new file mode 100644 index 00000000..1c6a2310 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vse32_v.h @@ -0,0 +1,2 @@ +// vse32.v and vsseg[2-8]e32.v +VI_ST(0, (i * nf + fn), uint32, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vse64_v.h b/vendor/riscv-isa-sim/riscv/insns/vse64_v.h new file mode 100644 index 00000000..61d0ba64 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vse64_v.h @@ -0,0 +1,2 @@ +// vse64.v and vsseg[2-8]e64.v +VI_ST(0, (i * nf + fn), uint64, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vse8_v.h b/vendor/riscv-isa-sim/riscv/insns/vse8_v.h new file mode 100644 index 00000000..01f59ceb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vse8_v.h @@ -0,0 +1,2 @@ +// vse8.v and vsseg[2-8]e8.v +VI_ST(0, (i * nf + fn), uint8, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsetivli.h b/vendor/riscv-isa-sim/riscv/insns/vsetivli.h new file mode 100644 index 00000000..04900a2f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsetivli.h @@ -0,0 +1,2 @@ +require_vector_novtype(false, false); +WRITE_RD(P.VU.set_vl(insn.rd(), -1, insn.rs1(), insn.v_zimm10())); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsetvl.h b/vendor/riscv-isa-sim/riscv/insns/vsetvl.h new file mode 100644 index 00000000..2969edc6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsetvl.h @@ -0,0 +1,2 @@ +require_vector_novtype(false, false); +WRITE_RD(P.VU.set_vl(insn.rd(), insn.rs1(), RS1, RS2)); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsetvli.h b/vendor/riscv-isa-sim/riscv/insns/vsetvli.h new file mode 100644 index 00000000..7b1f1d71 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsetvli.h @@ -0,0 +1,2 @@ +require_vector_novtype(false, false); +WRITE_RD(P.VU.set_vl(insn.rd(), insn.rs1(), RS1, insn.v_zimm11())); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsext_vf2.h b/vendor/riscv-isa-sim/riscv/insns/vsext_vf2.h new file mode 100644 index 00000000..16ccfac6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsext_vf2.h @@ -0,0 +1 @@ +VI_VV_EXT(2, int); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsext_vf4.h b/vendor/riscv-isa-sim/riscv/insns/vsext_vf4.h new file mode 100644 index 00000000..d4476a31 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsext_vf4.h @@ -0,0 +1 @@ +VI_VV_EXT(4, int); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsext_vf8.h b/vendor/riscv-isa-sim/riscv/insns/vsext_vf8.h new file mode 100644 index 00000000..09fdc2c7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsext_vf8.h @@ -0,0 +1 @@ +VI_VV_EXT(8, int); diff --git a/vendor/riscv-isa-sim/riscv/insns/vslide1down_vx.h b/vendor/riscv-isa-sim/riscv/insns/vslide1down_vx.h new file mode 100644 index 00000000..e867722f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslide1down_vx.h @@ -0,0 +1,44 @@ +//vslide1down.vx vd, vs2, rs1 +VI_CHECK_SLIDE(false); + +VI_LOOP_BASE +if (i != vl - 1) { + switch (sew) { + case e8: { + VI_XI_SLIDEDOWN_PARAMS(e8, 1); + vd = vs2; + } + break; + case e16: { + VI_XI_SLIDEDOWN_PARAMS(e16, 1); + vd = vs2; + } + break; + case e32: { + VI_XI_SLIDEDOWN_PARAMS(e32, 1); + vd = vs2; + } + break; + default: { + VI_XI_SLIDEDOWN_PARAMS(e64, 1); + vd = vs2; + } + break; + } +} else { + switch (sew) { + case e8: + P.VU.elt(rd_num, vl - 1, true) = RS1; + break; + case e16: + P.VU.elt(rd_num, vl - 1, true) = RS1; + break; + case e32: + P.VU.elt(rd_num, vl - 1, true) = RS1; + break; + default: + P.VU.elt(rd_num, vl - 1, true) = RS1; + break; + } +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslide1up_vx.h b/vendor/riscv-isa-sim/riscv/insns/vslide1up_vx.h new file mode 100644 index 00000000..33cb9ed6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslide1up_vx.h @@ -0,0 +1,30 @@ +//vslide1up.vx vd, vs2, rs1 +VI_CHECK_SLIDE(true); + +VI_LOOP_BASE +if (i != 0) { + if (sew == e8) { + VI_XI_SLIDEUP_PARAMS(e8, 1); + vd = vs2; + } else if(sew == e16) { + VI_XI_SLIDEUP_PARAMS(e16, 1); + vd = vs2; + } else if(sew == e32) { + VI_XI_SLIDEUP_PARAMS(e32, 1); + vd = vs2; + } else if(sew == e64) { + VI_XI_SLIDEUP_PARAMS(e64, 1); + vd = vs2; + } +} else { + if (sew == e8) { + P.VU.elt(rd_num, 0, true) = RS1; + } else if(sew == e16) { + P.VU.elt(rd_num, 0, true) = RS1; + } else if(sew == e32) { + P.VU.elt(rd_num, 0, true) = RS1; + } else if(sew == e64) { + P.VU.elt(rd_num, 0, true) = RS1; + } +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslidedown_vi.h b/vendor/riscv-isa-sim/riscv/insns/vslidedown_vi.h new file mode 100644 index 00000000..bc440cf2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslidedown_vi.h @@ -0,0 +1,36 @@ +// vslidedown.vi vd, vs2, rs1 +VI_CHECK_SLIDE(false); + +const reg_t sh = insn.v_zimm5(); +VI_LOOP_BASE + +reg_t offset = 0; +bool is_valid = (i + sh) < P.VU.vlmax; + +if (is_valid) { + offset = sh; +} + +switch (sew) { +case e8: { + VI_XI_SLIDEDOWN_PARAMS(e8, offset); + vd = is_valid ? vs2 : 0; +} +break; +case e16: { + VI_XI_SLIDEDOWN_PARAMS(e16, offset); + vd = is_valid ? vs2 : 0; +} +break; +case e32: { + VI_XI_SLIDEDOWN_PARAMS(e32, offset); + vd = is_valid ? vs2 : 0; +} +break; +default: { + VI_XI_SLIDEDOWN_PARAMS(e64, offset); + vd = is_valid ? vs2 : 0; +} +break; +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslidedown_vx.h b/vendor/riscv-isa-sim/riscv/insns/vslidedown_vx.h new file mode 100644 index 00000000..074aa508 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslidedown_vx.h @@ -0,0 +1,36 @@ +//vslidedown.vx vd, vs2, rs1 +VI_CHECK_SLIDE(false); + +const uint128_t sh = RS1; +VI_LOOP_BASE + +reg_t offset = 0; +bool is_valid = (i + sh) < P.VU.vlmax; + +if (is_valid) { + offset = sh; +} + +switch (sew) { +case e8: { + VI_XI_SLIDEDOWN_PARAMS(e8, offset); + vd = is_valid ? vs2 : 0; +} +break; +case e16: { + VI_XI_SLIDEDOWN_PARAMS(e16, offset); + vd = is_valid ? vs2 : 0; +} +break; +case e32: { + VI_XI_SLIDEDOWN_PARAMS(e32, offset); + vd = is_valid ? vs2 : 0; +} +break; +default: { + VI_XI_SLIDEDOWN_PARAMS(e64, offset); + vd = is_valid ? vs2 : 0; +} +break; +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslideup_vi.h b/vendor/riscv-isa-sim/riscv/insns/vslideup_vi.h new file mode 100644 index 00000000..3d537944 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslideup_vi.h @@ -0,0 +1,31 @@ +// vslideup.vi vd, vs2, rs1 +VI_CHECK_SLIDE(true); + +const reg_t offset = insn.v_zimm5(); +VI_LOOP_BASE +if (P.VU.vstart->read() < offset && i < offset) + continue; + +switch (sew) { +case e8: { + VI_XI_SLIDEUP_PARAMS(e8, offset); + vd = vs2; +} +break; +case e16: { + VI_XI_SLIDEUP_PARAMS(e16, offset); + vd = vs2; +} +break; +case e32: { + VI_XI_SLIDEUP_PARAMS(e32, offset); + vd = vs2; +} +break; +default: { + VI_XI_SLIDEUP_PARAMS(e64, offset); + vd = vs2; +} +break; +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vslideup_vx.h b/vendor/riscv-isa-sim/riscv/insns/vslideup_vx.h new file mode 100644 index 00000000..43d41fb3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vslideup_vx.h @@ -0,0 +1,31 @@ +//vslideup.vx vd, vs2, rs1 +VI_CHECK_SLIDE(true); + +const reg_t offset = RS1; +VI_LOOP_BASE +if (P.VU.vstart->read() < offset && i < offset) + continue; + +switch (sew) { +case e8: { + VI_XI_SLIDEUP_PARAMS(e8, offset); + vd = vs2; +} +break; +case e16: { + VI_XI_SLIDEUP_PARAMS(e16, offset); + vd = vs2; +} +break; +case e32: { + VI_XI_SLIDEUP_PARAMS(e32, offset); + vd = vs2; +} +break; +default: { + VI_XI_SLIDEUP_PARAMS(e64, offset); + vd = vs2; +} +break; +} +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsll_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsll_vi.h new file mode 100644 index 00000000..be465066 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsll_vi.h @@ -0,0 +1,5 @@ +// vsll.vi vd, vs2, zimm5 +VI_VI_LOOP +({ + vd = vs2 << (simm5 & (sew - 1) & 0x1f); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsll_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsll_vv.h new file mode 100644 index 00000000..ce820225 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsll_vv.h @@ -0,0 +1,5 @@ +// vsll +VI_VV_LOOP +({ + vd = vs2 << (vs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsll_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsll_vx.h new file mode 100644 index 00000000..823510b2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsll_vx.h @@ -0,0 +1,5 @@ +// vsll +VI_VX_LOOP +({ + vd = vs2 << (rs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsm_v.h b/vendor/riscv-isa-sim/riscv/insns/vsm_v.h new file mode 100644 index 00000000..e1d468be --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsm_v.h @@ -0,0 +1,2 @@ +// vse1.v +VI_ST(0, (i * nf + fn), uint8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsmul_vv.h new file mode 100644 index 00000000..413981ce --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsmul_vv.h @@ -0,0 +1,32 @@ +// vsmul.vv vd, vs2, vs1 +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +int64_t sign_mask = uint64_t(1) << (P.VU.vsew - 1); + +VI_VV_LOOP +({ + int64_t vs1_sign; + int64_t vs2_sign; + int64_t result_sign; + + vs1_sign = vs1 & sign_mask; + vs2_sign = vs2 & sign_mask; + bool overflow = vs1 == vs2 && vs1 == int_min; + + int128_t result = (int128_t)vs1 * (int128_t)vs2; + result_sign = (vs1_sign ^ vs2_sign) & sign_mask; + + // rounding + INT_ROUNDING(result, xrm, sew - 1); + // remove guard bits + result = result >> (sew - 1); + + // saturation + if (overflow) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsmul_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsmul_vx.h new file mode 100644 index 00000000..2e25670d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsmul_vx.h @@ -0,0 +1,33 @@ +// vsmul.vx vd, vs2, rs1 +VRM xrm = P.VU.get_vround_mode(); +int64_t int_max = INT64_MAX >> (64 - P.VU.vsew); +int64_t int_min = INT64_MIN >> (64 - P.VU.vsew); +int64_t sign_mask = uint64_t(1) << (P.VU.vsew - 1); + +VI_VX_LOOP +({ + int64_t rs1_sign; + int64_t vs2_sign; + int64_t result_sign; + + rs1_sign = rs1 & sign_mask; + vs2_sign = vs2 & sign_mask; + bool overflow = rs1 == vs2 && rs1 == int_min; + + int128_t result = (int128_t)rs1 * (int128_t)vs2; + result_sign = (rs1_sign ^ vs2_sign) & sign_mask; + + // rounding + INT_ROUNDING(result, xrm, sew - 1); + + // remove guard bits + result = result >> (sew - 1); + + // max saturation + if (overflow) { + result = int_max; + P_SET_OV(1); + } + + vd = result; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsoxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vsoxei16_v.h new file mode 100644 index 00000000..42c3c78d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsoxei16_v.h @@ -0,0 +1,2 @@ +// vsxei16.v and vsxseg[2-8]ei16.v +VI_ST_INDEX(e16, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsoxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vsoxei32_v.h new file mode 100644 index 00000000..f0aed6bd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsoxei32_v.h @@ -0,0 +1,2 @@ +// vsxei32.v and vsxseg[2-8]ei32.v +VI_ST_INDEX(e32, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsoxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vsoxei64_v.h new file mode 100644 index 00000000..88ddaf3f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsoxei64_v.h @@ -0,0 +1,2 @@ +// vsxei64.v and vsxseg[2-8]ei64.v +VI_ST_INDEX(e64, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsoxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vsoxei8_v.h new file mode 100644 index 00000000..621512c5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsoxei8_v.h @@ -0,0 +1,2 @@ +// vsxei8.v and vsxseg[2-8]ei8.v +VI_ST_INDEX(e8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsra_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsra_vi.h new file mode 100644 index 00000000..5c589274 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsra_vi.h @@ -0,0 +1,5 @@ +// vsra.vi vd, vs2, zimm5 +VI_VI_LOOP +({ + vd = vs2 >> (simm5 & (sew - 1) & 0x1f); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsra_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsra_vv.h new file mode 100644 index 00000000..8889af9c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsra_vv.h @@ -0,0 +1,5 @@ +// vsra.vv vd, vs2, vs1 +VI_VV_LOOP +({ + vd = vs2 >> (vs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsra_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsra_vx.h new file mode 100644 index 00000000..c1b0c107 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsra_vx.h @@ -0,0 +1,5 @@ +// vsra.vx vd, vs2, rs1 +VI_VX_LOOP +({ + vd = vs2 >> (rs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsrl_vi.h b/vendor/riscv-isa-sim/riscv/insns/vsrl_vi.h new file mode 100644 index 00000000..fe5d2720 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsrl_vi.h @@ -0,0 +1,5 @@ +// vsrl.vi vd, vs2, zimm5 +VI_VI_ULOOP +({ + vd = vs2 >> (zimm5 & (sew - 1) & 0x1f); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsrl_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsrl_vv.h new file mode 100644 index 00000000..6376af36 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsrl_vv.h @@ -0,0 +1,5 @@ +// vsrl.vv vd, vs2, vs1 +VI_VV_ULOOP +({ + vd = vs2 >> (vs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsrl_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsrl_vx.h new file mode 100644 index 00000000..a4f899ca --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsrl_vx.h @@ -0,0 +1,5 @@ +// vsrl.vx vd, vs2, rs1 +VI_VX_ULOOP +({ + vd = vs2 >> (rs1 & (sew - 1)); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsse16_v.h b/vendor/riscv-isa-sim/riscv/insns/vsse16_v.h new file mode 100644 index 00000000..5dcbaf9f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsse16_v.h @@ -0,0 +1,2 @@ +// vsse16v and vssseg[2-8]e16.v +VI_ST(i * RS2, fn, uint16, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsse32_v.h b/vendor/riscv-isa-sim/riscv/insns/vsse32_v.h new file mode 100644 index 00000000..80276b25 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsse32_v.h @@ -0,0 +1,2 @@ +// vsse32.v and vssseg[2-8]e32.v +VI_ST(i * RS2, fn, uint32, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsse64_v.h b/vendor/riscv-isa-sim/riscv/insns/vsse64_v.h new file mode 100644 index 00000000..a4b6290b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsse64_v.h @@ -0,0 +1,2 @@ +// vsse64.v and vssseg[2-8]e64.v +VI_ST(i * RS2, fn, uint64, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsse8_v.h b/vendor/riscv-isa-sim/riscv/insns/vsse8_v.h new file mode 100644 index 00000000..5ba3ccec --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsse8_v.h @@ -0,0 +1,2 @@ +// vsse8.v and vssseg[2-8]e8.v +VI_ST(i * RS2, fn, uint8, false); diff --git a/vendor/riscv-isa-sim/riscv/insns/vssra_vi.h b/vendor/riscv-isa-sim/riscv/insns/vssra_vi.h new file mode 100644 index 00000000..ff2e1c58 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssra_vi.h @@ -0,0 +1,10 @@ +// vssra.vi vd, vs2, simm5 +VRM xrm = P.VU.get_vround_mode(); +VI_VI_LOOP +({ + int sh = simm5 & (sew - 1) & 0x1f; + int128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssra_vv.h b/vendor/riscv-isa-sim/riscv/insns/vssra_vv.h new file mode 100644 index 00000000..7bbc766f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssra_vv.h @@ -0,0 +1,10 @@ +// vssra.vv vd, vs2, vs1 +VRM xrm = P.VU.get_vround_mode(); +VI_VV_LOOP +({ + int sh = vs1 & (sew - 1); + int128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssra_vx.h b/vendor/riscv-isa-sim/riscv/insns/vssra_vx.h new file mode 100644 index 00000000..068a22b6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssra_vx.h @@ -0,0 +1,10 @@ +// vssra.vx vd, vs2, rs1 +VRM xrm = P.VU.get_vround_mode(); +VI_VX_LOOP +({ + int sh = rs1 & (sew - 1); + int128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssrl_vi.h b/vendor/riscv-isa-sim/riscv/insns/vssrl_vi.h new file mode 100644 index 00000000..d125164d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssrl_vi.h @@ -0,0 +1,10 @@ +// vssra.vi vd, vs2, simm5 +VRM xrm = P.VU.get_vround_mode(); +VI_VI_ULOOP +({ + int sh = zimm5 & (sew - 1) & 0x1f; + uint128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssrl_vv.h b/vendor/riscv-isa-sim/riscv/insns/vssrl_vv.h new file mode 100644 index 00000000..a8e5d164 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssrl_vv.h @@ -0,0 +1,10 @@ +// vssrl.vv vd, vs2, vs1 +VRM xrm = P.VU.get_vround_mode(); +VI_VV_ULOOP +({ + int sh = vs1 & (sew - 1); + uint128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssrl_vx.h b/vendor/riscv-isa-sim/riscv/insns/vssrl_vx.h new file mode 100644 index 00000000..ee3cb346 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssrl_vx.h @@ -0,0 +1,10 @@ +// vssrl.vx vd, vs2, rs1 +VRM xrm = P.VU.get_vround_mode(); +VI_VX_ULOOP +({ + int sh = rs1 & (sew - 1); + uint128_t val = vs2; + + INT_ROUNDING(val, xrm, sh); + vd = val >> sh; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vssub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vssub_vv.h new file mode 100644 index 00000000..d55df238 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssub_vv.h @@ -0,0 +1,29 @@ +// vssub.vv vd, vs2, vs1 +VI_CHECK_SSS(true); +VI_LOOP_BASE +bool sat = false; + +switch (sew) { +case e8: { + VV_PARAMS(e8); + vd = sat_sub(vs2, vs1, sat); + break; +} +case e16: { + VV_PARAMS(e16); + vd = sat_sub(vs2, vs1, sat); + break; +} +case e32: { + VV_PARAMS(e32); + vd = sat_sub(vs2, vs1, sat); + break; +} +default: { + VV_PARAMS(e64); + vd = sat_sub(vs2, vs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vssub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vssub_vx.h new file mode 100644 index 00000000..cbfa2880 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssub_vx.h @@ -0,0 +1,29 @@ +// vssub.vx vd, vs2, rs1 +VI_CHECK_SSS(false); +VI_LOOP_BASE +bool sat = false; + +switch (sew) { +case e8: { + VX_PARAMS(e8); + vd = sat_sub(vs2, rs1, sat); + break; +} +case e16: { + VX_PARAMS(e16); + vd = sat_sub(vs2, rs1, sat); + break; +} +case e32: { + VX_PARAMS(e32); + vd = sat_sub(vs2, rs1, sat); + break; +} +default: { + VX_PARAMS(e64); + vd = sat_sub(vs2, rs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vssubu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vssubu_vv.h new file mode 100644 index 00000000..667a2c51 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssubu_vv.h @@ -0,0 +1,30 @@ +// vssubu.vv vd, vs2, vs1 +VI_CHECK_SSS(true); +VI_LOOP_BASE +bool sat = false; + +switch (sew) { +case e8: { + VV_U_PARAMS(e8); + vd = sat_subu(vs2, vs1, sat); + break; +} +case e16: { + VV_U_PARAMS(e16); + vd = sat_subu(vs2, vs1, sat); + break; +} +case e32: { + VV_U_PARAMS(e32); + vd = sat_subu(vs2, vs1, sat); + break; +} +default: { + VV_U_PARAMS(e64); + vd = sat_subu(vs2, vs1, sat); + break; +} +} +P_SET_OV(sat); + +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vssubu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vssubu_vx.h new file mode 100644 index 00000000..603f35e7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vssubu_vx.h @@ -0,0 +1,29 @@ +// vssubu.vx vd, vs2, rs1 +VI_CHECK_SSS(false); +VI_LOOP_BASE +bool sat = false; + +switch (sew) { +case e8: { + VX_U_PARAMS(e8); + vd = sat_subu(vs2, rs1, sat); + break; +} +case e16: { + VX_U_PARAMS(e16); + vd = sat_subu(vs2, rs1, sat); + break; +} +case e32: { + VX_U_PARAMS(e32); + vd = sat_subu(vs2, rs1, sat); + break; +} +default: { + VX_U_PARAMS(e64); + vd = sat_subu(vs2, rs1, sat); + break; +} +} +P_SET_OV(sat); +VI_LOOP_END diff --git a/vendor/riscv-isa-sim/riscv/insns/vsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vsub_vv.h new file mode 100644 index 00000000..7d119d50 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsub_vv.h @@ -0,0 +1,5 @@ +// vsub +VI_VV_LOOP +({ + vd = vs2 - vs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vsub_vx.h new file mode 100644 index 00000000..e075b423 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsub_vx.h @@ -0,0 +1,5 @@ +// vsub: vd[i] = (vd[i] * x[rs1]) - vs2[i] +VI_VX_LOOP +({ + vd = vs2 - rs1; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vsuxei16_v.h b/vendor/riscv-isa-sim/riscv/insns/vsuxei16_v.h new file mode 100644 index 00000000..f5549187 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsuxei16_v.h @@ -0,0 +1,2 @@ +// vsuxe16.v +VI_ST_INDEX(e16, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsuxei32_v.h b/vendor/riscv-isa-sim/riscv/insns/vsuxei32_v.h new file mode 100644 index 00000000..783bbade --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsuxei32_v.h @@ -0,0 +1,2 @@ +// vsuxe32.v +VI_ST_INDEX(e32, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsuxei64_v.h b/vendor/riscv-isa-sim/riscv/insns/vsuxei64_v.h new file mode 100644 index 00000000..9e6018b6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsuxei64_v.h @@ -0,0 +1,2 @@ +// vsuxe64.v +VI_ST_INDEX(e64, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vsuxei8_v.h b/vendor/riscv-isa-sim/riscv/insns/vsuxei8_v.h new file mode 100644 index 00000000..322dc35e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vsuxei8_v.h @@ -0,0 +1,2 @@ +// vsuxe8.v +VI_ST_INDEX(e8, true); diff --git a/vendor/riscv-isa-sim/riscv/insns/vwadd_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwadd_vv.h new file mode 100644 index 00000000..df4a1353 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwadd_vv.h @@ -0,0 +1,6 @@ +// vwadd.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, +, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwadd_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwadd_vx.h new file mode 100644 index 00000000..c2263893 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwadd_vx.h @@ -0,0 +1,6 @@ +// vwadd.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, +, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwadd_wv.h b/vendor/riscv-isa-sim/riscv/insns/vwadd_wv.h new file mode 100644 index 00000000..54d2ba40 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwadd_wv.h @@ -0,0 +1,6 @@ +// vwadd.wv vd, vs2, vs1 +VI_CHECK_DDS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(vs1, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwadd_wx.h b/vendor/riscv-isa-sim/riscv/insns/vwadd_wx.h new file mode 100644 index 00000000..bb4cee51 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwadd_wx.h @@ -0,0 +1,6 @@ +// vwaddu.wx vd, vs2, rs1 +VI_CHECK_DDS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(rs1, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwaddu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwaddu_vv.h new file mode 100644 index 00000000..286ebc85 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwaddu_vv.h @@ -0,0 +1,6 @@ +// vwaddu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, +, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwaddu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwaddu_vx.h new file mode 100644 index 00000000..61cddfc8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwaddu_vx.h @@ -0,0 +1,6 @@ +// vwaddu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, +, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwaddu_wv.h b/vendor/riscv-isa-sim/riscv/insns/vwaddu_wv.h new file mode 100644 index 00000000..fee81365 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwaddu_wv.h @@ -0,0 +1,6 @@ +// vwaddu.wv vd, vs2, vs1 +VI_CHECK_DDS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(vs1, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwaddu_wx.h b/vendor/riscv-isa-sim/riscv/insns/vwaddu_wx.h new file mode 100644 index 00000000..0073ac35 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwaddu_wx.h @@ -0,0 +1,6 @@ +// vwaddu.wx vd, vs2, rs1 +VI_CHECK_DDS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(rs1, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmacc_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmacc_vv.h new file mode 100644 index 00000000..7208c6d6 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmacc_vv.h @@ -0,0 +1,6 @@ +// vwmacc.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, vd_w, *, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmacc_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmacc_vx.h new file mode 100644 index 00000000..5ae597a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmacc_vx.h @@ -0,0 +1,6 @@ +// vwmacc.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, vd_w, *, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vv.h new file mode 100644 index 00000000..3aa43ef4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vv.h @@ -0,0 +1,6 @@ +// vwmaccsu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, vs1, vd_w, *, +, int, uint, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vx.h new file mode 100644 index 00000000..e00a21dd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccsu_vx.h @@ -0,0 +1,6 @@ +// vwmaccsu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, rs1, vd_w, *, +, int, uint, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vv.h new file mode 100644 index 00000000..2cbdaa31 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vv.h @@ -0,0 +1,6 @@ +// vwmaccu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, vd_w, *, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vx.h new file mode 100644 index 00000000..533297f3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccu_vx.h @@ -0,0 +1,6 @@ +// vwmaccu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, vd_w, *, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmaccus_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmaccus_vx.h new file mode 100644 index 00000000..5310f0e9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmaccus_vx.h @@ -0,0 +1,6 @@ +// vwmaccus.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, rs1, vd_w, *, +, int, int, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmul_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmul_vv.h new file mode 100644 index 00000000..2197edbf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmul_vv.h @@ -0,0 +1,6 @@ +// vwmul.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, *, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmul_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmul_vx.h new file mode 100644 index 00000000..bc1422d4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmul_vx.h @@ -0,0 +1,6 @@ +// vwmul.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, *, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vv.h new file mode 100644 index 00000000..ec373771 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vv.h @@ -0,0 +1,6 @@ +// vwmulsu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, vs1, 0, *, +, uint, int, uint) +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vx.h new file mode 100644 index 00000000..d58ecce0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmulsu_vx.h @@ -0,0 +1,6 @@ +// vwmulsu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN_MIX(vs2, rs1, 0, *, +, uint, int, uint) +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmulu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwmulu_vv.h new file mode 100644 index 00000000..8ddbb4b4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmulu_vv.h @@ -0,0 +1,6 @@ +// vwmulu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, *, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwmulu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwmulu_vx.h new file mode 100644 index 00000000..1ce77eef --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwmulu_vx.h @@ -0,0 +1,6 @@ +// vwmul.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, *, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwredsum_vs.h b/vendor/riscv-isa-sim/riscv/insns/vwredsum_vs.h new file mode 100644 index 00000000..c7a87db4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwredsum_vs.h @@ -0,0 +1,5 @@ +// vwredsum.vs vd, vs2, vs1 +VI_VV_LOOP_WIDE_REDUCTION +({ + vd_0_res += vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwredsumu_vs.h b/vendor/riscv-isa-sim/riscv/insns/vwredsumu_vs.h new file mode 100644 index 00000000..889a77d3 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwredsumu_vs.h @@ -0,0 +1,5 @@ +// vwredsum.vs vd, vs2, vs1 +VI_VV_ULOOP_WIDE_REDUCTION +({ + vd_0_res += vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsub_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwsub_vv.h new file mode 100644 index 00000000..99f93489 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsub_vv.h @@ -0,0 +1,6 @@ +// vwsub.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, -, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsub_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwsub_vx.h new file mode 100644 index 00000000..affdf62c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsub_vx.h @@ -0,0 +1,6 @@ +// vwsub.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, -, +, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsub_wv.h b/vendor/riscv-isa-sim/riscv/insns/vwsub_wv.h new file mode 100644 index 00000000..10db7308 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsub_wv.h @@ -0,0 +1,6 @@ +// vwsub.wv vd, vs2, vs1 +VI_CHECK_DDS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(vs1, -, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsub_wx.h b/vendor/riscv-isa-sim/riscv/insns/vwsub_wx.h new file mode 100644 index 00000000..f72341ba --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsub_wx.h @@ -0,0 +1,6 @@ +// vwsub.wx vd, vs2, rs1 +VI_CHECK_DDS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(rs1, -, int); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsubu_vv.h b/vendor/riscv-isa-sim/riscv/insns/vwsubu_vv.h new file mode 100644 index 00000000..cf68adb9 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsubu_vv.h @@ -0,0 +1,6 @@ +// vwsubu.vv vd, vs2, vs1 +VI_CHECK_DSS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, vs1, 0, -, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsubu_vx.h b/vendor/riscv-isa-sim/riscv/insns/vwsubu_vx.h new file mode 100644 index 00000000..3e972dd2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsubu_vx.h @@ -0,0 +1,6 @@ +// vwsubu.vx vd, vs2, rs1 +VI_CHECK_DSS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_OP_AND_ASSIGN(vs2, rs1, 0, -, +, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsubu_wv.h b/vendor/riscv-isa-sim/riscv/insns/vwsubu_wv.h new file mode 100644 index 00000000..3687c3d2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsubu_wv.h @@ -0,0 +1,6 @@ +// vwsubu.wv vd, vs2, vs1 +VI_CHECK_DDS(true); +VI_VV_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(vs1, -, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vwsubu_wx.h b/vendor/riscv-isa-sim/riscv/insns/vwsubu_wx.h new file mode 100644 index 00000000..c7f20edd --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vwsubu_wx.h @@ -0,0 +1,6 @@ +// vwsubu.wx vd, vs2, rs1 +VI_CHECK_DDS(false); +VI_VX_LOOP_WIDEN +({ + VI_WIDE_WVX_OP(rs1, -, uint); +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vxor_vi.h b/vendor/riscv-isa-sim/riscv/insns/vxor_vi.h new file mode 100644 index 00000000..b2dcf946 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vxor_vi.h @@ -0,0 +1,5 @@ +// vxor +VI_VI_LOOP +({ + vd = simm5 ^ vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vxor_vv.h b/vendor/riscv-isa-sim/riscv/insns/vxor_vv.h new file mode 100644 index 00000000..c37b6ab7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vxor_vv.h @@ -0,0 +1,5 @@ +// vxor +VI_VV_LOOP +({ + vd = vs1 ^ vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vxor_vx.h b/vendor/riscv-isa-sim/riscv/insns/vxor_vx.h new file mode 100644 index 00000000..8021e0e8 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vxor_vx.h @@ -0,0 +1,5 @@ +// vxor +VI_VX_LOOP +({ + vd = rs1 ^ vs2; +}) diff --git a/vendor/riscv-isa-sim/riscv/insns/vzext_vf2.h b/vendor/riscv-isa-sim/riscv/insns/vzext_vf2.h new file mode 100644 index 00000000..100f2e35 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vzext_vf2.h @@ -0,0 +1 @@ +VI_VV_EXT(2, uint); diff --git a/vendor/riscv-isa-sim/riscv/insns/vzext_vf4.h b/vendor/riscv-isa-sim/riscv/insns/vzext_vf4.h new file mode 100644 index 00000000..6ff920e0 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vzext_vf4.h @@ -0,0 +1 @@ +VI_VV_EXT(4, uint); diff --git a/vendor/riscv-isa-sim/riscv/insns/vzext_vf8.h b/vendor/riscv-isa-sim/riscv/insns/vzext_vf8.h new file mode 100644 index 00000000..b1762fbf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/vzext_vf8.h @@ -0,0 +1 @@ +VI_VV_EXT(8, uint); diff --git a/vendor/riscv-isa-sim/riscv/insns/wfi.h b/vendor/riscv-isa-sim/riscv/insns/wfi.h new file mode 100644 index 00000000..299cb01f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/wfi.h @@ -0,0 +1,11 @@ +if (STATE.v && STATE.prv == PRV_U) { + require_novirt(); +} else if (get_field(STATE.mstatus->read(), MSTATUS_TW)) { + require_privilege(PRV_M); +} else if (STATE.v) { // VS-mode + if (get_field(STATE.hstatus->read(), HSTATUS_VTW)) + require_novirt(); +} else { + require_privilege(PRV_S); +} +wfi(); diff --git a/vendor/riscv-isa-sim/riscv/insns/xnor.h b/vendor/riscv-isa-sim/riscv/insns/xnor.h new file mode 100644 index 00000000..ccf1c9f7 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xnor.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBB, EXT_ZBKB); +WRITE_RD(RS1 ^ ~RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/xor.h b/vendor/riscv-isa-sim/riscv/insns/xor.h new file mode 100644 index 00000000..771efa7f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xor.h @@ -0,0 +1 @@ +WRITE_RD(RS1 ^ RS2); diff --git a/vendor/riscv-isa-sim/riscv/insns/xori.h b/vendor/riscv-isa-sim/riscv/insns/xori.h new file mode 100644 index 00000000..33ce6307 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xori.h @@ -0,0 +1 @@ +WRITE_RD(insn.i_imm() ^ RS1); diff --git a/vendor/riscv-isa-sim/riscv/insns/xperm16.h b/vendor/riscv-isa-sim/riscv/insns/xperm16.h new file mode 100644 index 00000000..6b0ad51f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xperm16.h @@ -0,0 +1,2 @@ +require_extension(EXT_XZBP); +WRITE_RD(sext_xlen(xperm(RS1, RS2, 4, xlen))); diff --git a/vendor/riscv-isa-sim/riscv/insns/xperm32.h b/vendor/riscv-isa-sim/riscv/insns/xperm32.h new file mode 100644 index 00000000..64d90a40 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xperm32.h @@ -0,0 +1,3 @@ +require_rv64; +require_extension(EXT_XZBP); +WRITE_RD(xperm(RS1, RS2, 5, xlen)); diff --git a/vendor/riscv-isa-sim/riscv/insns/xperm4.h b/vendor/riscv-isa-sim/riscv/insns/xperm4.h new file mode 100644 index 00000000..38800f3b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xperm4.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBKX, EXT_XZBP); +WRITE_RD(sext_xlen(xperm(RS1, RS2, 2, xlen))); diff --git a/vendor/riscv-isa-sim/riscv/insns/xperm8.h b/vendor/riscv-isa-sim/riscv/insns/xperm8.h new file mode 100644 index 00000000..c272d669 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/xperm8.h @@ -0,0 +1,2 @@ +require_either_extension(EXT_ZBKX, EXT_XZBP); +WRITE_RD(sext_xlen(xperm(RS1, RS2, 3, xlen))); diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd810.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd810.h new file mode 100644 index 00000000..88434dee --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd810.h @@ -0,0 +1 @@ +P_ZUNPKD8(1, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd820.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd820.h new file mode 100644 index 00000000..f2065081 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd820.h @@ -0,0 +1 @@ +P_ZUNPKD8(2, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd830.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd830.h new file mode 100644 index 00000000..13655149 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd830.h @@ -0,0 +1 @@ +P_ZUNPKD8(3, 0) diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd831.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd831.h new file mode 100644 index 00000000..8febe77f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd831.h @@ -0,0 +1 @@ +P_ZUNPKD8(3, 1) diff --git a/vendor/riscv-isa-sim/riscv/insns/zunpkd832.h b/vendor/riscv-isa-sim/riscv/insns/zunpkd832.h new file mode 100644 index 00000000..f14030bc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/insns/zunpkd832.h @@ -0,0 +1 @@ +P_ZUNPKD8(3, 2) diff --git a/vendor/riscv-isa-sim/riscv/interactive.cc b/vendor/riscv-isa-sim/riscv/interactive.cc new file mode 100644 index 00000000..88eb86b1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/interactive.cc @@ -0,0 +1,579 @@ +// See LICENSE for license details. + +#include "sim.h" +#include "decode.h" +#include "disasm.h" +#include "mmu.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_CMD_STR 40 // maximum possible size of a command line + +#define STR_(X) #X // these definitions allow to use a macro as a string +#define STR(X) STR_(X) + +DECLARE_TRAP(-1, interactive) + +processor_t *sim_t::get_core(const std::string& i) +{ + char *ptr; + unsigned long p = strtoul(i.c_str(), &ptr, 10); + if (*ptr || p >= procs.size()) + throw trap_interactive(); + return get_core(p); +} + +static std::string readline(int fd) +{ + struct termios tios; + bool noncanonical = tcgetattr(fd, &tios) == 0 && (tios.c_lflag & ICANON) == 0; + + std::string s; + for (char ch; read(fd, &ch, 1) == 1; ) + { + if (ch == '\x7f') + { + if (s.empty()) + continue; + s.erase(s.end()-1); + + if (noncanonical && write(fd, "\b \b", 3) != 3) {} + } + else if (noncanonical && write(fd, &ch, 1) != 1) {} + + if (ch == '\n') + break; + if (ch != '\x7f') + s += ch; + } + return s; +} + +#ifdef HAVE_BOOST_ASIO +// read input command string +std::string sim_t::rin(boost::asio::streambuf *bout_ptr) { + std::string s; + if (acceptor_ptr) { // if we are listening, get commands from socket + try { + socket_ptr.reset(new boost::asio::ip::tcp::socket(*io_service_ptr)); + acceptor_ptr->accept(*socket_ptr); // wait for someone to open connection + boost::asio::streambuf buf; + boost::asio::read_until(*socket_ptr, buf, "\n"); // wait for command + s = boost::asio::buffer_cast(buf.data()); + boost::erase_all(s, "\r"); // get rid off any cr and lf + boost::erase_all(s, "\n"); + // The socket client is a web server and it appends the IP of the computer + // that sent the command from its web browser. + + // For now, erase the IP if it is there. + boost::regex re(" ((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}" + "(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])$"); + s = boost::regex_replace(s, re, (std::string)""); + + // TODO: check the IP against the IP used to upload RISC-V source files + } catch (std::exception& e) { + std::cerr << e.what() << std::endl; + } + // output goes to socket + sout_.rdbuf(bout_ptr); + } else { // if we are not listening on a socket, get commands from terminal + std::cerr << ": " << std::flush; + s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin + // output goes to stderr + sout_.rdbuf(std::cerr.rdbuf()); + } + return s; +} + +// write sout_ to socket (via bout) +void sim_t::wout(boost::asio::streambuf *bout_ptr) { + if (!cmd_file && acceptor_ptr) { // only if we are not getting command inputs from a file + // and if a socket has been created + try { + boost::system::error_code ignored_error; + boost::asio::write(*socket_ptr, *bout_ptr, boost::asio::transfer_all(), ignored_error); + socket_ptr->close(); // close the socket after each command input/ouput + // This is need to in order to make the socket interface + // acessible by HTTP GET via a socket client in a web server. + } catch (std::exception& e) { + std::cerr << e.what() << std::endl; + } + } +} +#endif + +void sim_t::interactive() +{ + typedef void (sim_t::*interactive_func)(const std::string&, const std::vector&); + std::map funcs; + + funcs["run"] = &sim_t::interactive_run_noisy; + funcs["r"] = funcs["run"]; + funcs["rs"] = &sim_t::interactive_run_silent; + funcs["vreg"] = &sim_t::interactive_vreg; + funcs["reg"] = &sim_t::interactive_reg; + funcs["freg"] = &sim_t::interactive_freg; + funcs["fregh"] = &sim_t::interactive_fregh; + funcs["fregs"] = &sim_t::interactive_fregs; + funcs["fregd"] = &sim_t::interactive_fregd; + funcs["pc"] = &sim_t::interactive_pc; + funcs["mem"] = &sim_t::interactive_mem; + funcs["str"] = &sim_t::interactive_str; + funcs["until"] = &sim_t::interactive_until_silent; + funcs["untiln"] = &sim_t::interactive_until_noisy; + funcs["while"] = &sim_t::interactive_until_silent; + funcs["quit"] = &sim_t::interactive_quit; + funcs["q"] = funcs["quit"]; + funcs["help"] = &sim_t::interactive_help; + funcs["h"] = funcs["help"]; + + while (!done()) + { +#ifdef HAVE_BOOST_ASIO + boost::asio::streambuf bout; // socket output +#endif + std::string s; + char cmd_str[MAX_CMD_STR+1]; // only used for following fscanf + // first get commands from file, if cmd_file has been set + if (cmd_file && !feof(cmd_file) && fscanf(cmd_file,"%" STR(MAX_CMD_STR) "[^\n]\n", cmd_str)==1) { + // up to MAX_CMD_STR characters before \n, skipping \n + s = cmd_str; + // while we get input from file, output goes to stderr + sout_.rdbuf(std::cerr.rdbuf()); + } else { + // when there are no commands left from file or if there was no file from the beginning + cmd_file = NULL; // mark file pointer as being not valid, so any method can test this easily +#ifdef HAVE_BOOST_ASIO + s = rin(&bout); // get command string from socket or terminal +#else + std::cerr << ": " << std::flush; + s = readline(2); // 2 is stderr, but when doing reads it reverts to stdin +#endif + } + + std::stringstream ss(s); + std::string cmd, tmp; + std::vector args; + + if (!(ss >> cmd)) + { + set_procs_debug(true); + step(1); +#ifdef HAVE_BOOST_ASIO + wout(&bout); // socket output, if required +#endif + continue; + } + + while (ss >> tmp) + args.push_back(tmp); + + std::ostream out(sout_.rdbuf()); + + try + { + if (funcs.count(cmd)) + (this->*funcs[cmd])(cmd, args); + else + out << "Unknown command " << cmd << std::endl; + } catch(trap_t& t) { + out << "Bad or missing arguments for command " << cmd << std::endl; + } +#ifdef HAVE_BOOST_ASIO + wout(&bout); // socket output, if required +#endif + } + ctrlc_pressed = false; +} + +void sim_t::interactive_help(const std::string& cmd, const std::vector& args) +{ + std::ostream out(sout_.rdbuf()); + out << + "Interactive commands:\n" + "reg [reg] # Display [reg] (all if omitted) in \n" + "freg # Display float in as hex\n" + "fregh # Display half precision in \n" + "fregs # Display single precision in \n" + "fregd # Display double precision in \n" + "vreg [reg] # Display vector [reg] (all if omitted) in \n" + "pc # Show current PC in \n" + "mem # Show contents of physical memory\n" + "str # Show NUL-terminated C string at in core \n" + "until reg # Stop when in hits \n" + "until pc # Stop when PC in hits \n" + "untiln pc # Run noisy and stop when PC in hits \n" + "until mem # Stop when memory becomes \n" + "while reg # Run while in is \n" + "while pc # Run while PC in is \n" + "while mem # Run while memory is \n" + "run [count] # Resume noisy execution (until CTRL+C, or [count] insns)\n" + "r [count] Alias for run\n" + "rs [count] # Resume silent execution (until CTRL+C, or [count] insns)\n" + "quit # End the simulation\n" + "q Alias for quit\n" + "help # This screen!\n" + "h Alias for help\n" + "Note: Hitting enter is the same as: run 1" + << std::endl; +} + +void sim_t::interactive_run_noisy(const std::string& cmd, const std::vector& args) +{ + interactive_run(cmd,args,true); +} + +void sim_t::interactive_run_silent(const std::string& cmd, const std::vector& args) +{ + interactive_run(cmd,args,false); +} + +void sim_t::interactive_run(const std::string& cmd, const std::vector& args, bool noisy) +{ + size_t steps = args.size() ? atoll(args[0].c_str()) : -1; + ctrlc_pressed = false; + set_procs_debug(noisy); + for (size_t i = 0; i < steps && !ctrlc_pressed && !done(); i++) + step(1); + + std::ostream out(sout_.rdbuf()); + if (!noisy) out << ":" << std::endl; +} + +void sim_t::interactive_quit(const std::string& cmd, const std::vector& args) +{ + exit(0); +} + +reg_t sim_t::get_pc(const std::vector& args) +{ + if (args.size() != 1) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + return p->get_state()->pc; +} + +void sim_t::interactive_pc(const std::string& cmd, const std::vector& args) +{ + if(args.size() != 1) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + int max_xlen = p->get_isa().get_max_xlen(); + + std::ostream out(sout_.rdbuf()); + out << std::hex << std::setfill('0') << "0x" << std::setw(max_xlen/4) + << zext(get_pc(args), max_xlen) << std::endl; +} + +reg_t sim_t::get_reg(const std::vector& args) +{ + if (args.size() != 2) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + + unsigned long r = std::find(xpr_name, xpr_name + NXPR, args[1]) - xpr_name; + if (r == NXPR) { + char *ptr; + r = strtoul(args[1].c_str(), &ptr, 10); + if (*ptr) { + #define DECLARE_CSR(name, number) if (args[1] == #name) return p->get_csr(number); + #include "encoding.h" // generates if's for all csrs + r = NXPR; // else case (csr name not found) + #undef DECLARE_CSR + } + } + + if (r >= NXPR) + throw trap_interactive(); + + return p->get_state()->XPR[r]; +} + +freg_t sim_t::get_freg(const std::vector& args) +{ + if(args.size() != 2) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + int r = std::find(fpr_name, fpr_name + NFPR, args[1]) - fpr_name; + if (r == NFPR) + r = atoi(args[1].c_str()); + if (r >= NFPR) + throw trap_interactive(); + + return p->get_state()->FPR[r]; +} + +void sim_t::interactive_vreg(const std::string& cmd, const std::vector& args) +{ + if (args.size() < 1) + throw trap_interactive(); + + int rstart = 0; + int rend = NVPR; + if (args.size() >= 2) { + rstart = strtol(args[1].c_str(), NULL, 0); + if (!(rstart >= 0 && rstart < NVPR)) { + rstart = 0; + } else { + rend = rstart + 1; + } + } + + // Show all the regs! + processor_t *p = get_core(args[0]); + const int vlen = (int)(p->VU.get_vlen()) >> 3; + const int elen = (int)(p->VU.get_elen()) >> 3; + const int num_elem = vlen/elen; + + std::ostream out(sout_.rdbuf()); + out << std::dec << "VLEN=" << (vlen << 3) << " bits; ELEN=" << (elen << 3) << " bits" << std::endl; + + for (int r = rstart; r < rend; ++r) { + out << std::setfill (' ') << std::left << std::setw(4) << vr_name[r] << std::right << ": "; + for (int e = num_elem-1; e >= 0; --e){ + uint64_t val; + switch(elen){ + case 8: + val = p->VU.elt(r, e); + out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(16) << val << " "; + break; + case 4: + val = p->VU.elt(r, e); + out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(8) << (uint32_t)val << " "; + break; + case 2: + val = p->VU.elt(r, e); + out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(8) << (uint16_t)val << " "; + break; + case 1: + val = p->VU.elt(r, e); + out << std::dec << "[" << e << "]: 0x" << std::hex << std::setfill ('0') << std::setw(8) << (int)(uint8_t)val << " "; + break; + } + } + out << std::endl; + } +} + + +void sim_t::interactive_reg(const std::string& cmd, const std::vector& args) +{ + if (args.size() < 1) + throw trap_interactive(); + + processor_t *p = get_core(args[0]); + int max_xlen = p->get_isa().get_max_xlen(); + + std::ostream out(sout_.rdbuf()); + out << std::hex; + + if (args.size() == 1) { + // Show all the regs! + + for (int r = 0; r < NXPR; ++r) { + out << std::setfill(' ') << std::setw(4) << xpr_name[r] + << ": 0x" << std::setfill('0') << std::setw(max_xlen/4) + << zext(p->get_state()->XPR[r], max_xlen); + if ((r + 1) % 4 == 0) + out << std::endl; + } + } else { + out << "0x" << std::setfill('0') << std::setw(max_xlen/4) + << zext(get_reg(args), max_xlen) << std::endl; + } +} + +union fpr +{ + freg_t r; + float s; + double d; +}; + +void sim_t::interactive_freg(const std::string& cmd, const std::vector& args) +{ + freg_t r = get_freg(args); + + std::ostream out(sout_.rdbuf()); + out << std::hex << "0x" << std::setfill ('0') << std::setw(16) << r.v[1] << std::setw(16) << r.v[0] << std::endl; +} + +void sim_t::interactive_fregh(const std::string& cmd, const std::vector& args) +{ + fpr f; + f.r = freg(f16_to_f32(f16(get_freg(args)))); + + std::ostream out(sout_.rdbuf()); + out << (isBoxedF32(f.r) ? (double)f.s : NAN) << std::endl; +} + +void sim_t::interactive_fregs(const std::string& cmd, const std::vector& args) +{ + fpr f; + f.r = get_freg(args); + + std::ostream out(sout_.rdbuf()); + out << (isBoxedF32(f.r) ? (double)f.s : NAN) << std::endl; +} + +void sim_t::interactive_fregd(const std::string& cmd, const std::vector& args) +{ + fpr f; + f.r = get_freg(args); + + std::ostream out(sout_.rdbuf()); + out << (isBoxedF64(f.r) ? f.d : NAN) << std::endl; +} + +reg_t sim_t::get_mem(const std::vector& args) +{ + if (args.size() != 1 && args.size() != 2) + throw trap_interactive(); + + std::string addr_str = args[0]; + mmu_t* mmu = debug_mmu; + if (args.size() == 2) + { + processor_t *p = get_core(args[0]); + mmu = p->get_mmu(); + addr_str = args[1]; + } + + reg_t addr = strtol(addr_str.c_str(),NULL,16), val; + if (addr == LONG_MAX) + addr = strtoul(addr_str.c_str(),NULL,16); + + switch(addr % 8) + { + case 0: + val = mmu->load_uint64(addr); + break; + case 4: + val = mmu->load_uint32(addr); + break; + case 2: + case 6: + val = mmu->load_uint16(addr); + break; + default: + val = mmu->load_uint8(addr); + break; + } + return val; +} + +void sim_t::interactive_mem(const std::string& cmd, const std::vector& args) +{ + int max_xlen = procs[0]->get_isa().get_max_xlen(); + + std::ostream out(sout_.rdbuf()); + out << std::hex << "0x" << std::setfill('0') << std::setw(max_xlen/4) + << zext(get_mem(args), max_xlen) << std::endl; +} + +void sim_t::interactive_str(const std::string& cmd, const std::vector& args) +{ + if (args.size() != 1 && args.size() != 2) + throw trap_interactive(); + + std::string addr_str = args[0]; + mmu_t* mmu = debug_mmu; + if (args.size() == 2) + { + processor_t *p = get_core(args[0]); + mmu = p->get_mmu(); + addr_str = args[1]; + } + + reg_t addr = strtol(addr_str.c_str(),NULL,16); + + std::ostream out(sout_.rdbuf()); + + char ch; + while((ch = mmu->load_uint8(addr++))) + out << ch; + + out << std::endl; +} + +void sim_t::interactive_until_silent(const std::string& cmd, const std::vector& args) +{ + interactive_until(cmd, args, false); +} + +void sim_t::interactive_until_noisy(const std::string& cmd, const std::vector& args) +{ + interactive_until(cmd, args, true); +} + +void sim_t::interactive_until(const std::string& cmd, const std::vector& args, bool noisy) +{ + bool cmd_until = cmd == "until" || cmd == "untiln"; + + if (args.size() < 3) + throw trap_interactive(); + + if (args.size() == 3) + get_core(args[1]); // make sure that argument is a valid core number + + char *end; + reg_t val = strtol(args[args.size()-1].c_str(),&end,16); + if (val == LONG_MAX) + val = strtoul(args[args.size()-1].c_str(),&end,16); + if (args[args.size()-1].c_str() == end) // not a valid number + throw trap_interactive(); + + // mask bits above max_xlen + int max_xlen = procs[strtol(args[1].c_str(),NULL,10)]->get_isa().get_max_xlen(); + if (max_xlen == 32) val &= 0xFFFFFFFF; + + std::vector args2; + args2 = std::vector(args.begin()+1,args.end()-1); + + auto func = args[0] == "reg" ? &sim_t::get_reg : + args[0] == "pc" ? &sim_t::get_pc : + args[0] == "mem" ? &sim_t::get_mem : + NULL; + + if (func == NULL) + throw trap_interactive(); + + ctrlc_pressed = false; + + while (1) + { + try + { + reg_t current = (this->*func)(args2); + + // mask bits above max_xlen + if (max_xlen == 32) current &= 0xFFFFFFFF; + + if (cmd_until == (current == val)) + break; + if (ctrlc_pressed) + break; + } + catch (trap_t& t) {} + + set_procs_debug(noisy); + step(1); + } +} diff --git a/vendor/riscv-isa-sim/riscv/isa_parser.cc b/vendor/riscv-isa-sim/riscv/isa_parser.cc new file mode 100644 index 00000000..0adec2cc --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/isa_parser.cc @@ -0,0 +1,247 @@ +#include "isa_parser.h" +#include "extension.h" + +static std::string strtolower(const char* str) +{ + std::string res; + for (const char *r = str; *r; r++) + res += std::tolower(*r); + return res; +} + +static void bad_option_string(const char *option, const char *value, + const char *msg) +{ + fprintf(stderr, "error: bad %s option '%s'. %s\n", option, value, msg); + abort(); +} + +static void bad_isa_string(const char* isa, const char* msg) +{ + bad_option_string("--isa", isa, msg); +} + +static void bad_priv_string(const char* priv) +{ + fprintf(stderr, "error: bad --priv option %s\n", priv); + abort(); +} + +isa_parser_t::isa_parser_t(const char* str, const char *priv) + : extension_table(256, false) +{ + isa_string = strtolower(str); + const char* all_subsets = "mafdqchpv"; + + max_isa = reg_t(2) << 62; + // enable zicntr and zihpm unconditionally for backward compatibility + extension_table[EXT_ZICNTR] = true; + extension_table[EXT_ZIHPM] = true; + + if (isa_string.compare(0, 4, "rv32") == 0) + max_xlen = 32, max_isa = reg_t(1) << 30; + else if (isa_string.compare(0, 4, "rv64") == 0) + max_xlen = 64; + else + bad_isa_string(str, "ISA strings must begin with RV32 or RV64"); + + switch (isa_string[4]) { + case 'g': + // G = IMAFD_Zicsr_Zifencei, but Spike includes the latter two + // unconditionally, so they need not be explicitly added here. + isa_string = isa_string.substr(0, 4) + "imafd" + isa_string.substr(5); + // Fall through + case 'i': + max_isa |= 1L << ('i' - 'a'); + break; + + case 'e': + max_isa |= 1L << ('e' - 'a'); + break; + + default: + bad_isa_string(str, ("'" + isa_string.substr(0, 4) + "' must be followed by I, E, or G").c_str()); + } + + const char* isa_str = isa_string.c_str(); + auto p = isa_str, subset = all_subsets; + for (p += 5; islower(*p) && !strchr("zsx", *p); ++p) { + while (*subset && (*p != *subset)) + ++subset; + + if (!*subset) { + if (strchr(all_subsets, *p)) + bad_isa_string(str, ("Extension '" + std::string(1, *p) + "' appears too late in ISA string").c_str()); + else + bad_isa_string(str, ("Unsupported extension '" + std::string(1, *p) + "'").c_str()); + } + + switch (*p) { + case 'p': extension_table[EXT_ZBPBO] = true; + extension_table[EXT_ZPN] = true; + extension_table[EXT_ZPSFOPERAND] = true; + extension_table[EXT_ZMMUL] = true; break; + case 'v': // even rv32iv implies double float + case 'q': max_isa |= 1L << ('d' - 'a'); + // Fall through + case 'd': max_isa |= 1L << ('f' - 'a'); + } + max_isa |= 1L << (*p - 'a'); + extension_table[toupper(*p)] = true; + while (isdigit(*(p + 1))) { + ++p; // skip major version, point, and minor version if presented + if (*(p + 1) == 'p') ++p; + } + p += *(p + 1) == '_'; // underscores may be used to improve readability + } + + while (islower(*p) || (*p == '_')) { + p += *p == '_'; // first underscore is optional + auto end = p; + do ++end; while (*end && *end != '_'); + auto ext_str = std::string(p, end); + if (ext_str == "zfh" || ext_str == "zfhmin") { + if (!((max_isa >> ('f' - 'a')) & 1)) + bad_isa_string(str, ("'" + ext_str + "' extension requires 'F'").c_str()); + extension_table[EXT_ZFHMIN] = true; + if (ext_str == "zfh") + extension_table[EXT_ZFH] = true; + } else if (ext_str == "zicsr") { + // Spike necessarily has Zicsr, because + // Zicsr is implied by the privileged architecture + } else if (ext_str == "zifencei") { + // For compatibility with version 2.0 of the base ISAs, we + // unconditionally include FENCE.I, so Zifencei adds nothing more. + } else if (ext_str == "zihintpause") { + // HINTs encoded in base-ISA instructions are always present. + } else if (ext_str == "zmmul") { + extension_table[EXT_ZMMUL] = true; + } else if (ext_str == "zba") { + extension_table[EXT_ZBA] = true; + } else if (ext_str == "zbb") { + extension_table[EXT_ZBB] = true; + } else if (ext_str == "zbc") { + extension_table[EXT_ZBC] = true; + } else if (ext_str == "zbs") { + extension_table[EXT_ZBS] = true; + } else if (ext_str == "zbkb") { + extension_table[EXT_ZBKB] = true; + } else if (ext_str == "zbkc") { + extension_table[EXT_ZBKC] = true; + } else if (ext_str == "zbkx") { + extension_table[EXT_ZBKX] = true; + } else if (ext_str == "zk") { + extension_table[EXT_ZBKB] = true; + extension_table[EXT_ZBKC] = true; + extension_table[EXT_ZBKX] = true; + extension_table[EXT_ZKND] = true; + extension_table[EXT_ZKNE] = true; + extension_table[EXT_ZKNH] = true; + extension_table[EXT_ZKR] = true; + } else if (ext_str == "zkn") { + extension_table[EXT_ZBKB] = true; + extension_table[EXT_ZBKC] = true; + extension_table[EXT_ZBKX] = true; + extension_table[EXT_ZKND] = true; + extension_table[EXT_ZKNE] = true; + extension_table[EXT_ZKNH] = true; + } else if (ext_str == "zknd") { + extension_table[EXT_ZKND] = true; + } else if (ext_str == "zkne") { + extension_table[EXT_ZKNE] = true; + } else if (ext_str == "zknh") { + extension_table[EXT_ZKNH] = true; + } else if (ext_str == "zks") { + extension_table[EXT_ZBKB] = true; + extension_table[EXT_ZBKC] = true; + extension_table[EXT_ZBKX] = true; + extension_table[EXT_ZKSED] = true; + extension_table[EXT_ZKSH] = true; + } else if (ext_str == "zksed") { + extension_table[EXT_ZKSED] = true; + } else if (ext_str == "zksh") { + extension_table[EXT_ZKSH] = true; + } else if (ext_str == "zkr") { + extension_table[EXT_ZKR] = true; + } else if (ext_str == "zkt") { + } else if (ext_str == "svnapot") { + extension_table[EXT_SVNAPOT] = true; + } else if (ext_str == "svpbmt") { + extension_table[EXT_SVPBMT] = true; + } else if (ext_str == "svinval") { + extension_table[EXT_SVINVAL] = true; + } else if (ext_str == "zicbom") { + extension_table[EXT_ZICBOM] = true; + } else if (ext_str == "zicboz") { + extension_table[EXT_ZICBOZ] = true; + } else if (ext_str == "zicbop") { + } else if (ext_str == "zicntr") { + } else if (ext_str == "zihpm") { + } else if (ext_str[0] == 'x') { + max_isa |= 1L << ('x' - 'a'); + extension_table[toupper('x')] = true; + if (ext_str == "xbitmanip") { + extension_table[EXT_XZBP] = true; + extension_table[EXT_XZBS] = true; + extension_table[EXT_XZBE] = true; + extension_table[EXT_XZBF] = true; + extension_table[EXT_XZBC] = true; + extension_table[EXT_XZBM] = true; + extension_table[EXT_XZBR] = true; + extension_table[EXT_XZBT] = true; + } else if (ext_str == "xzbp") { + extension_table[EXT_XZBP] = true; + } else if (ext_str == "xzbs") { + extension_table[EXT_XZBS] = true; + } else if (ext_str == "xzbe") { + extension_table[EXT_XZBE] = true; + } else if (ext_str == "xzbf") { + extension_table[EXT_XZBF] = true; + } else if (ext_str == "xzbc") { + extension_table[EXT_XZBC] = true; + } else if (ext_str == "xzbm") { + extension_table[EXT_XZBM] = true; + } else if (ext_str == "xzbr") { + extension_table[EXT_XZBR] = true; + } else if (ext_str == "xzbt") { + extension_table[EXT_XZBT] = true; + } else if (ext_str.size() == 1) { + bad_isa_string(str, "single 'X' is not a proper name"); + } else if (ext_str != "xdummy") { + extension_t* x = find_extension(ext_str.substr(1).c_str())(); + if (!extensions.insert(std::make_pair(x->name(), x)).second) { + fprintf(stderr, "extensions must have unique names (got two named \"%s\"!)\n", x->name()); + abort(); + } + } + } else { + bad_isa_string(str, ("unsupported extension: " + ext_str).c_str()); + } + p = end; + } + if (*p) { + bad_isa_string(str, ("can't parse: " + std::string(p)).c_str()); + } + + std::string lowercase = strtolower(priv); + bool user = false, supervisor = false; + + if (lowercase == "m") + ; + else if (lowercase == "mu") + user = true; + else if (lowercase == "msu") + user = supervisor = true; + else + bad_priv_string(priv); + + if (user) { + max_isa |= reg_t(user) << ('u' - 'a'); + extension_table['U'] = true; + } + + if (supervisor) { + max_isa |= reg_t(supervisor) << ('s' - 'a'); + extension_table['S'] = true; + } +} diff --git a/vendor/riscv-isa-sim/riscv/isa_parser.h b/vendor/riscv-isa-sim/riscv/isa_parser.h new file mode 100644 index 00000000..3cefe12d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/isa_parser.h @@ -0,0 +1,90 @@ +// See LICENSE for license details. +#ifndef _RISCV_ISA_PARSER_H +#define _RISCV_ISA_PARSER_H + +#include "decode.h" + +#include +#include +#include + +class extension_t; + +typedef enum { + // 65('A') ~ 90('Z') is reserved for standard isa in misa + EXT_ZFH, + EXT_ZFHMIN, + EXT_ZBA, + EXT_ZBB, + EXT_ZBC, + EXT_ZBS, + EXT_ZBKB, + EXT_ZBKC, + EXT_ZBKX, + EXT_ZKND, + EXT_ZKNE, + EXT_ZKNH, + EXT_ZKSED, + EXT_ZKSH, + EXT_ZKR, + EXT_ZMMUL, + EXT_ZBPBO, + EXT_ZPN, + EXT_ZPSFOPERAND, + EXT_SVNAPOT, + EXT_SVPBMT, + EXT_SVINVAL, + EXT_ZDINX, + EXT_ZFINX, + EXT_ZHINX, + EXT_ZHINXMIN, + EXT_ZICBOM, + EXT_ZICBOZ, + EXT_ZICNTR, + EXT_ZIHPM, + EXT_XZBP, + EXT_XZBS, + EXT_XZBE, + EXT_XZBF, + EXT_XZBC, + EXT_XZBM, + EXT_XZBR, + EXT_XZBT, +} isa_extension_t; + +typedef enum { + IMPL_MMU_SV32, + IMPL_MMU_SV39, + IMPL_MMU_SV48, + IMPL_MMU_SV57, + IMPL_MMU_SBARE, + IMPL_MMU, + IMPL_MMU_VMID, + IMPL_MMU_ASID, +} impl_extension_t; + +class isa_parser_t { +public: + isa_parser_t(const char* str, const char *priv); + ~isa_parser_t(){}; + unsigned get_max_xlen() const { return max_xlen; } + reg_t get_max_isa() const { return max_isa; } + std::string get_isa_string() const { return isa_string; } + bool extension_enabled(unsigned char ext) const { + if (ext >= 'A' && ext <= 'Z') + return (max_isa >> (ext - 'A')) & 1; + else + return extension_table[ext]; + } + const std::unordered_map & + get_extensions() const { return extensions; } + +protected: + unsigned max_xlen; + reg_t max_isa; + std::vector extension_table; + std::string isa_string; + std::unordered_map extensions; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/jtag_dtm.cc b/vendor/riscv-isa-sim/riscv/jtag_dtm.cc new file mode 100644 index 00000000..9ca38afb --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/jtag_dtm.cc @@ -0,0 +1,204 @@ +#include + +#include "decode.h" +#include "jtag_dtm.h" +#include "debug_module.h" +#include "debug_defines.h" + +#if 0 +# define D(x) x +#else +# define D(x) +#endif + +enum { + IR_IDCODE=1, + IR_DTMCONTROL=0x10, + IR_DBUS=0x11, + IR_BYPASS=0x1f +}; + +#define DTMCONTROL_VERSION 0xf +#define DTMCONTROL_ABITS (0x3f << 4) +#define DTMCONTROL_DMISTAT (3<<10) +#define DTMCONTROL_IDLE (7<<12) +#define DTMCONTROL_DMIRESET (1<<16) +#define DTMCONTROL_DMIHARDRESET (1<<17) + +#define DMI_OP 3 +#define DMI_DATA (0xffffffffLL<<2) +#define DMI_ADDRESS ((1LL<<(abits+34)) - (1LL<<34)) + +#define DMI_OP_STATUS_SUCCESS 0 +#define DMI_OP_STATUS_RESERVED 1 +#define DMI_OP_STATUS_FAILED 2 +#define DMI_OP_STATUS_BUSY 3 + +#define DMI_OP_NOP 0 +#define DMI_OP_READ 1 +#define DMI_OP_WRITE 2 +#define DMI_OP_RESERVED 3 + +jtag_dtm_t::jtag_dtm_t(debug_module_t *dm, unsigned required_rti_cycles) : + dm(dm), required_rti_cycles(required_rti_cycles), + _tck(false), _tms(false), _tdi(false), _tdo(false), + dtmcontrol((abits << DTM_DTMCS_ABITS_OFFSET) | 1), + dmi(DMI_OP_STATUS_SUCCESS << DTM_DMI_OP_OFFSET), + bypass(0), + _state(TEST_LOGIC_RESET) +{ +} + +void jtag_dtm_t::reset() { + _state = TEST_LOGIC_RESET; + busy_stuck = false; + rti_remaining = 0; + dmi = 0; +} + +void jtag_dtm_t::set_pins(bool tck, bool tms, bool tdi) { + const jtag_state_t next[16][2] = { + /* TEST_LOGIC_RESET */ { RUN_TEST_IDLE, TEST_LOGIC_RESET }, + /* RUN_TEST_IDLE */ { RUN_TEST_IDLE, SELECT_DR_SCAN }, + /* SELECT_DR_SCAN */ { CAPTURE_DR, SELECT_IR_SCAN }, + /* CAPTURE_DR */ { SHIFT_DR, EXIT1_DR }, + /* SHIFT_DR */ { SHIFT_DR, EXIT1_DR }, + /* EXIT1_DR */ { PAUSE_DR, UPDATE_DR }, + /* PAUSE_DR */ { PAUSE_DR, EXIT2_DR }, + /* EXIT2_DR */ { SHIFT_DR, UPDATE_DR }, + /* UPDATE_DR */ { RUN_TEST_IDLE, SELECT_DR_SCAN }, + /* SELECT_IR_SCAN */ { CAPTURE_IR, TEST_LOGIC_RESET }, + /* CAPTURE_IR */ { SHIFT_IR, EXIT1_IR }, + /* SHIFT_IR */ { SHIFT_IR, EXIT1_IR }, + /* EXIT1_IR */ { PAUSE_IR, UPDATE_IR }, + /* PAUSE_IR */ { PAUSE_IR, EXIT2_IR }, + /* EXIT2_IR */ { SHIFT_IR, UPDATE_IR }, + /* UPDATE_IR */ { RUN_TEST_IDLE, SELECT_DR_SCAN } + }; + + if (!_tck && tck) { + // Positive clock edge. TMS and TDI are sampled on the rising edge of TCK by + // Target. + switch (_state) { + case SHIFT_DR: + dr >>= 1; + dr |= (uint64_t) _tdi << (dr_length-1); + break; + case SHIFT_IR: + ir >>= 1; + ir |= _tdi << (ir_length-1); + break; + default: + break; + } + _state = next[_state][_tms]; + + } else { + // Negative clock edge. TDO is updated. + switch (_state) { + case RUN_TEST_IDLE: + if (rti_remaining > 0) + rti_remaining--; + dm->run_test_idle(); + break; + case TEST_LOGIC_RESET: + ir = IR_IDCODE; + break; + case CAPTURE_DR: + capture_dr(); + break; + case SHIFT_DR: + _tdo = dr & 1; + break; + case UPDATE_DR: + update_dr(); + break; + case SHIFT_IR: + _tdo = ir & 1; + break; + default: + break; + } + } + + D(fprintf(stderr, "state=%2d, tdi=%d, tdo=%d, tms=%d, tck=%d, ir=0x%02x, " + "dr=0x%lx\n", + _state, _tdi, _tdo, _tms, _tck, ir, dr)); + + _tck = tck; + _tms = tms; + _tdi = tdi; +} + +void jtag_dtm_t::capture_dr() +{ + switch (ir) { + case IR_IDCODE: + dr = idcode; + dr_length = 32; + break; + case IR_DTMCONTROL: + dr = dtmcontrol; + dr_length = 32; + break; + case IR_DBUS: + if (rti_remaining > 0 || busy_stuck) { + dr = DMI_OP_STATUS_BUSY; + busy_stuck = true; + } else { + dr = dmi; + } + dr_length = abits + 34; + break; + case IR_BYPASS: + dr = bypass; + dr_length = 1; + break; + default: + fprintf(stderr, "Unsupported IR: 0x%x\n", ir); + break; + } + D(fprintf(stderr, "Capture DR; IR=0x%x, DR=0x%lx (%d bits)\n", + ir, dr, dr_length)); +} + +void jtag_dtm_t::update_dr() +{ + D(fprintf(stderr, "Update DR; IR=0x%x, DR=0x%lx (%d bits)\n", + ir, dr, dr_length)); + if (ir == IR_DTMCONTROL) { + if (dr & DTMCONTROL_DMIRESET) + busy_stuck = false; + if (dr & DTMCONTROL_DMIHARDRESET) + reset(); + } else if (ir == IR_BYPASS) { + bypass = dr; + } else if (ir == IR_DBUS && !busy_stuck) { + unsigned op = get_field(dr, DMI_OP); + uint32_t data = get_field(dr, DMI_DATA); + unsigned address = get_field(dr, DMI_ADDRESS); + + dmi = dr; + + bool success = true; + if (op == DMI_OP_READ) { + uint32_t value; + if (dm->dmi_read(address, &value)) { + dmi = set_field(dmi, DMI_DATA, value); + } else { + success = false; + } + } else if (op == DMI_OP_WRITE) { + success = dm->dmi_write(address, data); + } + + if (success) { + dmi = set_field(dmi, DMI_OP, DMI_OP_STATUS_SUCCESS); + } else { + dmi = set_field(dmi, DMI_OP, DMI_OP_STATUS_FAILED); + } + D(fprintf(stderr, "dmi=0x%lx\n", dmi)); + + rti_remaining = required_rti_cycles; + } +} diff --git a/vendor/riscv-isa-sim/riscv/jtag_dtm.h b/vendor/riscv-isa-sim/riscv/jtag_dtm.h new file mode 100644 index 00000000..23a54be1 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/jtag_dtm.h @@ -0,0 +1,69 @@ +#ifndef JTAG_DTM_H +#define JTAG_DTM_H + +#include + +class debug_module_t; + +typedef enum { + TEST_LOGIC_RESET, + RUN_TEST_IDLE, + SELECT_DR_SCAN, + CAPTURE_DR, + SHIFT_DR, + EXIT1_DR, + PAUSE_DR, + EXIT2_DR, + UPDATE_DR, + SELECT_IR_SCAN, + CAPTURE_IR, + SHIFT_IR, + EXIT1_IR, + PAUSE_IR, + EXIT2_IR, + UPDATE_IR +} jtag_state_t; + +class jtag_dtm_t +{ + static const unsigned idcode = 0xdeadbeef; + + public: + jtag_dtm_t(debug_module_t *dm, unsigned required_rti_cycles); + void reset(); + + void set_pins(bool tck, bool tms, bool tdi); + + bool tdo() const { return _tdo; } + + jtag_state_t state() const { return _state; } + + private: + debug_module_t *dm; + // The number of Run-Test/Idle cycles required before a DMI access is + // complete. + unsigned required_rti_cycles; + bool _tck, _tms, _tdi, _tdo; + uint32_t ir; + const unsigned ir_length = 5; + uint64_t dr; + unsigned dr_length; + + // abits must come before dtmcontrol so it can easily be used in the + // constructor. + const unsigned abits = 6; + uint32_t dtmcontrol; + uint64_t dmi; + unsigned bypass; + // Number of Run-Test/Idle cycles needed before we call this access + // complete. + unsigned rti_remaining; + bool busy_stuck; + + jtag_state_t _state; + + void capture_dr(); + void update_dr(); +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/log_file.h b/vendor/riscv-isa-sim/riscv/log_file.h new file mode 100644 index 00000000..d039859d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/log_file.h @@ -0,0 +1,37 @@ +// See LICENSE for license details. +#ifndef _RISCV_LOGFILE_H +#define _RISCV_LOGFILE_H + +#include +#include +#include +#include + +// Header-only class wrapping a log file. When constructed with an +// actual path, it opens the named file for writing. When constructed +// with the null path, it wraps stderr. +class log_file_t +{ +public: + log_file_t(const char *path) + : wrapped_file (nullptr, &fclose) + { + if (!path) + return; + + wrapped_file.reset(fopen(path, "w")); + if (! wrapped_file) { + std::ostringstream oss; + oss << "Failed to open log file at `" << path << "': " + << strerror (errno); + throw std::runtime_error(oss.str()); + } + } + + FILE *get() { return wrapped_file ? wrapped_file.get() : stderr; } + +private: + std::unique_ptr wrapped_file; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/memtracer.h b/vendor/riscv-isa-sim/riscv/memtracer.h new file mode 100644 index 00000000..72bb3a88 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/memtracer.h @@ -0,0 +1,56 @@ +// See LICENSE for license details. + +#ifndef _MEMTRACER_H +#define _MEMTRACER_H + +#include +#include +#include + +enum access_type { + LOAD, + STORE, + FETCH, +}; + +class memtracer_t +{ + public: + memtracer_t() {} + virtual ~memtracer_t() {} + + virtual bool interested_in_range(uint64_t begin, uint64_t end, access_type type) = 0; + virtual void trace(uint64_t addr, size_t bytes, access_type type) = 0; + virtual void clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval) = 0; +}; + +class memtracer_list_t : public memtracer_t +{ + public: + bool empty() { return list.empty(); } + bool interested_in_range(uint64_t begin, uint64_t end, access_type type) + { + for (auto it: list) + if (it->interested_in_range(begin, end, type)) + return true; + return false; + } + void trace(uint64_t addr, size_t bytes, access_type type) + { + for (auto it: list) + it->trace(addr, bytes, type); + } + void clean_invalidate(uint64_t addr, size_t bytes, bool clean, bool inval) + { + for (auto it: list) + it->clean_invalidate(addr, bytes, clean, inval); + } + void hook(memtracer_t* h) + { + list.push_back(h); + } + private: + std::vector list; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/mmio_plugin.h b/vendor/riscv-isa-sim/riscv/mmio_plugin.h new file mode 100644 index 00000000..f14470bf --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/mmio_plugin.h @@ -0,0 +1,91 @@ +#ifndef _RISCV_MMIO_PLUGIN_H +#define _RISCV_MMIO_PLUGIN_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + +typedef uint64_t reg_t; + +typedef struct { + // Allocate user data for an instance of the plugin. The parameter is a simple + // c-string containing arguments used to construct the plugin. It returns a + // void* to the allocated data. + void* (*alloc)(const char*); + + // Load a memory address of the MMIO plugin. The parameters are the user_data + // (void*), memory offset (reg_t), number of bytes to load (size_t), and the + // buffer into which the loaded data should be written (uint8_t*). Return true + // if the load is successful and false otherwise. + bool (*load)(void*, reg_t, size_t, uint8_t*); + + // Store some bytes to a memory address of the MMIO plugin. The parameters are + // the user_data (void*), memory offset (reg_t), number of bytes to store + // (size_t), and the buffer containing the data to be stored (const uint8_t*). + // Return true if the store is successful and false otherwise. + bool (*store)(void*, reg_t, size_t, const uint8_t*); + + // Deallocate the data allocated during the call to alloc. The parameter is a + // pointer to the user data allocated during the call to alloc. + void (*dealloc)(void*); +} mmio_plugin_t; + +// Register an mmio plugin with the application. This should be called by +// plugins as part of their loading process. +extern void register_mmio_plugin(const char* name_cstr, + const mmio_plugin_t* mmio_plugin); + +#ifdef __cplusplus +} + +#include + +// Wrapper around the C plugin API that makes registering a C++ class with +// correctly formed constructor, load, and store functions easier. The template +// type should be the type that implements the MMIO plugin interface. Simply +// make a global mmio_plugin_registration_t and your plugin should register +// itself with the application when it is loaded because the +// mmio_plugin_registration_t constructor will be called. +template +struct mmio_plugin_registration_t +{ + static void* alloc(const char* args) + { + return reinterpret_cast(new T(std::string(args))); + } + + static bool load(void* self, reg_t addr, size_t len, uint8_t* bytes) + { + return reinterpret_cast(self)->load(addr, len, bytes); + } + + static bool store(void* self, reg_t addr, size_t len, const uint8_t* bytes) + { + return reinterpret_cast(self)->store(addr, len, bytes); + } + + static void dealloc(void* self) + { + delete reinterpret_cast(self); + } + + mmio_plugin_registration_t(const std::string& name) + { + mmio_plugin_t plugin = { + mmio_plugin_registration_t::alloc, + mmio_plugin_registration_t::load, + mmio_plugin_registration_t::store, + mmio_plugin_registration_t::dealloc, + }; + + register_mmio_plugin(name.c_str(), &plugin); + } +}; +#endif // __cplusplus + +#endif diff --git a/vendor/riscv-isa-sim/riscv/mmu.cc b/vendor/riscv-isa-sim/riscv/mmu.cc new file mode 100644 index 00000000..db787a80 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/mmu.cc @@ -0,0 +1,447 @@ +// See LICENSE for license details. + +#include "mmu.h" +#include "arith.h" +#include "simif.h" +#include "processor.h" + +mmu_t::mmu_t(simif_t* sim, processor_t* proc) + : sim(sim), proc(proc), +#ifdef RISCV_ENABLE_DUAL_ENDIAN + target_big_endian(false), +#endif + check_triggers_fetch(false), + check_triggers_load(false), + check_triggers_store(false), + matched_trigger(NULL) +{ + flush_tlb(); + yield_load_reservation(); +} + +mmu_t::~mmu_t() +{ +} + +void mmu_t::flush_icache() +{ + for (size_t i = 0; i < ICACHE_ENTRIES; i++) + icache[i].tag = -1; +} + +void mmu_t::flush_tlb() +{ + memset(tlb_insn_tag, -1, sizeof(tlb_insn_tag)); + memset(tlb_load_tag, -1, sizeof(tlb_load_tag)); + memset(tlb_store_tag, -1, sizeof(tlb_store_tag)); + + flush_icache(); +} + +static void throw_access_exception(bool virt, reg_t addr, access_type type) +{ + switch (type) { + case FETCH: throw trap_instruction_access_fault(virt, addr, 0, 0); + case LOAD: throw trap_load_access_fault(virt, addr, 0, 0); + case STORE: throw trap_store_access_fault(virt, addr, 0, 0); + default: abort(); + } +} + +reg_t mmu_t::translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags) +{ + if (!proc) + return addr; + + bool virt = proc->state.v; + bool hlvx = xlate_flags & RISCV_XLATE_VIRT_HLVX; + reg_t mode = proc->state.prv; + if (type != FETCH) { + if (!proc->state.debug_mode && get_field(proc->state.mstatus->read(), MSTATUS_MPRV)) { + mode = get_field(proc->state.mstatus->read(), MSTATUS_MPP); + if (get_field(proc->state.mstatus->read(), MSTATUS_MPV) && mode != PRV_M) + virt = true; + } + if (xlate_flags & RISCV_XLATE_VIRT) { + virt = true; + mode = get_field(proc->state.hstatus->read(), HSTATUS_SPVP); + } + } + + reg_t paddr = walk(addr, type, mode, virt, hlvx) | (addr & (PGSIZE-1)); + if (!pmp_ok(paddr, len, type, mode)) + throw_access_exception(virt, addr, type); + return paddr; +} + +tlb_entry_t mmu_t::fetch_slow_path(reg_t vaddr) +{ + reg_t paddr = translate(vaddr, sizeof(fetch_temp), FETCH, 0); + + if (auto host_addr = sim->addr_to_mem(paddr)) { + return refill_tlb(vaddr, paddr, host_addr, FETCH); + } else { + if (!mmio_load(paddr, sizeof fetch_temp, (uint8_t*)&fetch_temp)) + throw trap_instruction_access_fault(proc->state.v, vaddr, 0, 0); + tlb_entry_t entry = {(char*)&fetch_temp - vaddr, paddr - vaddr}; + return entry; + } +} + +reg_t reg_from_bytes(size_t len, const uint8_t* bytes) +{ + switch (len) { + case 1: + return bytes[0]; + case 2: + return bytes[0] | + (((reg_t) bytes[1]) << 8); + case 4: + return bytes[0] | + (((reg_t) bytes[1]) << 8) | + (((reg_t) bytes[2]) << 16) | + (((reg_t) bytes[3]) << 24); + case 8: + return bytes[0] | + (((reg_t) bytes[1]) << 8) | + (((reg_t) bytes[2]) << 16) | + (((reg_t) bytes[3]) << 24) | + (((reg_t) bytes[4]) << 32) | + (((reg_t) bytes[5]) << 40) | + (((reg_t) bytes[6]) << 48) | + (((reg_t) bytes[7]) << 56); + } + abort(); +} + +bool mmu_t::mmio_ok(reg_t addr, access_type type) +{ + // Disallow access to debug region when not in debug mode + if (addr >= DEBUG_START && addr <= DEBUG_END && proc && !proc->state.debug_mode) + return false; + + return true; +} + +bool mmu_t::mmio_load(reg_t addr, size_t len, uint8_t* bytes) +{ + if (!mmio_ok(addr, LOAD)) + return false; + + return sim->mmio_load(addr, len, bytes); +} + +bool mmu_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) +{ + if (!mmio_ok(addr, STORE)) + return false; + + return sim->mmio_store(addr, len, bytes); +} + +void mmu_t::load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags) +{ + reg_t paddr = translate(addr, len, LOAD, xlate_flags); + + if (auto host_addr = sim->addr_to_mem(paddr)) { + memcpy(bytes, host_addr, len); + if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) + tracer.trace(paddr, len, LOAD); + else if (xlate_flags == 0) + refill_tlb(addr, paddr, host_addr, LOAD); + } else if (!mmio_load(paddr, len, bytes)) { + throw trap_load_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + } + + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); + if (matched_trigger) + throw *matched_trigger; + } +} + +void mmu_t::store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store) +{ + reg_t paddr = translate(addr, len, STORE, xlate_flags); + + if (!matched_trigger) { + reg_t data = reg_from_bytes(len, bytes); + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, data); + if (matched_trigger) + throw *matched_trigger; + } + + if (actually_store) { + if (auto host_addr = sim->addr_to_mem(paddr)) { + memcpy(host_addr, bytes, len); + if (tracer.interested_in_range(paddr, paddr + PGSIZE, STORE)) + tracer.trace(paddr, len, STORE); + else if (xlate_flags == 0) + refill_tlb(addr, paddr, host_addr, STORE); + } else if (!mmio_store(paddr, len, bytes)) { + throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + } + } +} + +tlb_entry_t mmu_t::refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type) +{ + reg_t idx = (vaddr >> PGSHIFT) % TLB_ENTRIES; + reg_t expected_tag = vaddr >> PGSHIFT; + + tlb_entry_t entry = {host_addr - vaddr, paddr - vaddr}; + + if (proc && get_field(proc->state.mstatus->read(), MSTATUS_MPRV)) + return entry; + + if ((tlb_load_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) + tlb_load_tag[idx] = -1; + if ((tlb_store_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) + tlb_store_tag[idx] = -1; + if ((tlb_insn_tag[idx] & ~TLB_CHECK_TRIGGERS) != expected_tag) + tlb_insn_tag[idx] = -1; + + if ((check_triggers_fetch && type == FETCH) || + (check_triggers_load && type == LOAD) || + (check_triggers_store && type == STORE)) + expected_tag |= TLB_CHECK_TRIGGERS; + + if (pmp_homogeneous(paddr & ~reg_t(PGSIZE - 1), PGSIZE)) { + if (type == FETCH) tlb_insn_tag[idx] = expected_tag; + else if (type == STORE) tlb_store_tag[idx] = expected_tag; + else tlb_load_tag[idx] = expected_tag; + } + + tlb_data[idx] = entry; + return entry; +} + +bool mmu_t::pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode) +{ + if (!proc || proc->n_pmp == 0) + return true; + + bool mseccfg_mml = proc->state.mseccfg->get_mml(); + bool mseccfg_mmwp = proc->state.mseccfg->get_mmwp(); + + for (size_t i = 0; i < proc->n_pmp; i++) { + // Check each 4-byte sector of the access + bool any_match = false; + bool all_match = true; + for (reg_t offset = 0; offset < len; offset += 1 << PMP_SHIFT) { + reg_t cur_addr = addr + offset; + bool match = proc->state.pmpaddr[i]->match4(cur_addr); + any_match |= match; + all_match &= match; + } + + if (any_match) { + // If the PMP matches only a strict subset of the access, fail it + if (!all_match) + return false; + + return proc->state.pmpaddr[i]->access_ok(type, mode); + } + } + return ((mode == PRV_M) && !mseccfg_mmwp && (!mseccfg_mml || ((type == LOAD) || (type == STORE)))); +} + +reg_t mmu_t::pmp_homogeneous(reg_t addr, reg_t len) +{ + if ((addr | len) & (len - 1)) + abort(); + + if (!proc) + return true; + + for (size_t i = 0; i < proc->n_pmp; i++) + if (proc->state.pmpaddr[i]->subset_match(addr, len)) + return false; + + return true; +} + +reg_t mmu_t::s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_type, bool virt, bool hlvx) +{ + if (!virt) + return gpa; + + vm_info vm = decode_vm_info(proc->get_const_xlen(), true, 0, proc->get_state()->hgatp->read()); + if (vm.levels == 0) + return gpa; + + int maxgpabits = vm.levels * vm.idxbits + vm.widenbits + PGSHIFT; + reg_t maxgpa = (1ULL << maxgpabits) - 1; + + bool mxr = proc->state.sstatus->readvirt(false) & MSTATUS_MXR; + + reg_t base = vm.ptbase; + if ((gpa & ~maxgpa) == 0) { + for (int i = vm.levels - 1; i >= 0; i--) { + int ptshift = i * vm.idxbits; + int idxbits = (i == (vm.levels - 1)) ? vm.idxbits + vm.widenbits : vm.idxbits; + reg_t idx = (gpa >> (PGSHIFT + ptshift)) & ((reg_t(1) << idxbits) - 1); + + // check that physical address of PTE is legal + auto pte_paddr = base + idx * vm.ptesize; + auto ppte = sim->addr_to_mem(pte_paddr); + if (!ppte || !pmp_ok(pte_paddr, vm.ptesize, LOAD, PRV_S)) { + throw_access_exception(virt, gva, trap_type); + } + + reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian*)ppte) : from_target(*(target_endian*)ppte); + reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT; + + if (pte & PTE_RSVD) { + break; + } else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) { + break; + } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) { + break; + } else if (PTE_TABLE(pte)) { // next level of page table + if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) + break; + base = ppn << PGSHIFT; + } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) { + break; + } else if (!(pte & PTE_U)) { + break; + } else if (type == FETCH || hlvx ? !(pte & PTE_X) : + type == LOAD ? !(pte & PTE_R) && !(mxr && (pte & PTE_X)) : + !((pte & PTE_R) && (pte & PTE_W))) { + break; + } else if ((ppn & ((reg_t(1) << ptshift) - 1)) != 0) { + break; + } else { + reg_t ad = PTE_A | ((type == STORE) * PTE_D); +#ifdef RISCV_ENABLE_DIRTY + // set accessed and possibly dirty bits. + if ((pte & ad) != ad) { + if (!pmp_ok(pte_paddr, vm.ptesize, STORE, PRV_S)) + throw_access_exception(virt, gva, trap_type); + *(target_endian*)ppte |= to_target((uint32_t)ad); + } +#else + // take exception if access or possibly dirty bit is not set. + if ((pte & ad) != ad) + break; +#endif + reg_t vpn = gpa >> PGSHIFT; + reg_t page_mask = (reg_t(1) << PGSHIFT) - 1; + + int napot_bits = ((pte & PTE_N) ? (ctz(ppn) + 1) : 0); + if (((pte & PTE_N) && (ppn == 0 || i != 0)) || (napot_bits != 0 && napot_bits != 4)) + break; + + reg_t page_base = ((ppn & ~((reg_t(1) << napot_bits) - 1)) + | (vpn & ((reg_t(1) << napot_bits) - 1)) + | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT; + return page_base | (gpa & page_mask); + } + } + } + + switch (trap_type) { + case FETCH: throw trap_instruction_guest_page_fault(gva, gpa >> 2, 0); + case LOAD: throw trap_load_guest_page_fault(gva, gpa >> 2, 0); + case STORE: throw trap_store_guest_page_fault(gva, gpa >> 2, 0); + default: abort(); + } +} + +reg_t mmu_t::walk(reg_t addr, access_type type, reg_t mode, bool virt, bool hlvx) +{ + reg_t page_mask = (reg_t(1) << PGSHIFT) - 1; + reg_t satp = proc->get_state()->satp->readvirt(virt); + vm_info vm = decode_vm_info(proc->get_const_xlen(), false, mode, satp); + if (vm.levels == 0) + return s2xlate(addr, addr & ((reg_t(2) << (proc->xlen-1))-1), type, type, virt, hlvx) & ~page_mask; // zero-extend from xlen + + bool s_mode = mode == PRV_S; + bool sum = proc->state.sstatus->readvirt(virt) & MSTATUS_SUM; + bool mxr = (proc->state.sstatus->readvirt(false) | proc->state.sstatus->readvirt(virt)) & MSTATUS_MXR; + + // verify bits xlen-1:va_bits-1 are all equal + int va_bits = PGSHIFT + vm.levels * vm.idxbits; + reg_t mask = (reg_t(1) << (proc->xlen - (va_bits-1))) - 1; + reg_t masked_msbs = (addr >> (va_bits-1)) & mask; + if (masked_msbs != 0 && masked_msbs != mask) + vm.levels = 0; + + reg_t base = vm.ptbase; + for (int i = vm.levels - 1; i >= 0; i--) { + int ptshift = i * vm.idxbits; + reg_t idx = (addr >> (PGSHIFT + ptshift)) & ((1 << vm.idxbits) - 1); + + // check that physical address of PTE is legal + auto pte_paddr = s2xlate(addr, base + idx * vm.ptesize, LOAD, type, virt, false); + auto ppte = sim->addr_to_mem(pte_paddr); + if (!ppte || !pmp_ok(pte_paddr, vm.ptesize, LOAD, PRV_S)) + throw_access_exception(virt, addr, type); + + reg_t pte = vm.ptesize == 4 ? from_target(*(target_endian*)ppte) : from_target(*(target_endian*)ppte); + reg_t ppn = (pte & ~reg_t(PTE_ATTR)) >> PTE_PPN_SHIFT; + + if (pte & PTE_RSVD) { + break; + } else if (!proc->extension_enabled(EXT_SVNAPOT) && (pte & PTE_N)) { + break; + } else if (!proc->extension_enabled(EXT_SVPBMT) && (pte & PTE_PBMT)) { + break; + } else if (PTE_TABLE(pte)) { // next level of page table + if (pte & (PTE_D | PTE_A | PTE_U | PTE_N | PTE_PBMT)) + break; + base = ppn << PGSHIFT; + } else if ((pte & PTE_U) ? s_mode && (type == FETCH || !sum) : !s_mode) { + break; + } else if (!(pte & PTE_V) || (!(pte & PTE_R) && (pte & PTE_W))) { + break; + } else if (type == FETCH || hlvx ? !(pte & PTE_X) : + type == LOAD ? !(pte & PTE_R) && !(mxr && (pte & PTE_X)) : + !((pte & PTE_R) && (pte & PTE_W))) { + break; + } else if ((ppn & ((reg_t(1) << ptshift) - 1)) != 0) { + break; + } else { + reg_t ad = PTE_A | ((type == STORE) * PTE_D); +#ifdef RISCV_ENABLE_DIRTY + // set accessed and possibly dirty bits. + if ((pte & ad) != ad) { + if (!pmp_ok(pte_paddr, vm.ptesize, STORE, PRV_S)) + throw_access_exception(virt, addr, type); + *(target_endian*)ppte |= to_target((uint32_t)ad); + } +#else + // take exception if access or possibly dirty bit is not set. + if ((pte & ad) != ad) + break; +#endif + // for superpage or Svnapot NAPOT mappings, make a fake leaf PTE for the TLB's benefit. + reg_t vpn = addr >> PGSHIFT; + + int napot_bits = ((pte & PTE_N) ? (ctz(ppn) + 1) : 0); + if (((pte & PTE_N) && (ppn == 0 || i != 0)) || (napot_bits != 0 && napot_bits != 4)) + break; + + reg_t page_base = ((ppn & ~((reg_t(1) << napot_bits) - 1)) + | (vpn & ((reg_t(1) << napot_bits) - 1)) + | (vpn & ((reg_t(1) << ptshift) - 1))) << PGSHIFT; + reg_t phys = page_base | (addr & page_mask); + return s2xlate(addr, phys, type, type, virt, hlvx) & ~page_mask; + } + } + + switch (type) { + case FETCH: throw trap_instruction_page_fault(virt, addr, 0, 0); + case LOAD: throw trap_load_page_fault(virt, addr, 0, 0); + case STORE: throw trap_store_page_fault(virt, addr, 0, 0); + default: abort(); + } +} + +void mmu_t::register_memtracer(memtracer_t* t) +{ + flush_tlb(); + tracer.hook(t); +} diff --git a/vendor/riscv-isa-sim/riscv/mmu.h b/vendor/riscv-isa-sim/riscv/mmu.h new file mode 100644 index 00000000..8964e294 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/mmu.h @@ -0,0 +1,559 @@ +// See LICENSE for license details. + +#ifndef _RISCV_MMU_H +#define _RISCV_MMU_H + +#include "decode.h" +#include "trap.h" +#include "common.h" +#include "config.h" +#include "simif.h" +#include "processor.h" +#include "memtracer.h" +#include "byteorder.h" +#include "triggers.h" +#include +#include + +// virtual memory configuration +#define PGSHIFT 12 +const reg_t PGSIZE = 1 << PGSHIFT; +const reg_t PGMASK = ~(PGSIZE-1); +#define MAX_PADDR_BITS 56 // imposed by Sv39 / Sv48 + +struct insn_fetch_t +{ + insn_func_t func; + insn_t insn; +}; + +struct icache_entry_t { + reg_t tag; + struct icache_entry_t* next; + insn_fetch_t data; +}; + +struct tlb_entry_t { + char* host_offset; + reg_t target_offset; +}; + +// this class implements a processor's port into the virtual memory system. +// an MMU and instruction cache are maintained for simulator performance. +class mmu_t +{ +private: + std::map alloc_cache; + std::vector> addr_tbl; +public: + mmu_t(simif_t* sim, processor_t* proc); + ~mmu_t(); + +#define RISCV_XLATE_VIRT (1U << 0) +#define RISCV_XLATE_VIRT_HLVX (1U << 1) + + inline reg_t misaligned_load(reg_t addr, size_t size, uint32_t xlate_flags) + { +#ifdef RISCV_ENABLE_MISALIGNED + reg_t res = 0; + for (size_t i = 0; i < size; i++) + res += (reg_t)load_uint8(addr + (target_big_endian? size-1-i : i)) << (i * 8); + return res; +#else + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); + throw trap_load_address_misaligned(gva, addr, 0, 0); +#endif + } + + inline void misaligned_store(reg_t addr, reg_t data, size_t size, uint32_t xlate_flags, bool actually_store=true) + { +#ifdef RISCV_ENABLE_MISALIGNED + for (size_t i = 0; i < size; i++) + store_uint8(addr + (target_big_endian? size-1-i : i), data >> (i * 8), actually_store); +#else + bool gva = ((proc) ? proc->state.v : false) || (RISCV_XLATE_VIRT & xlate_flags); + throw trap_store_address_misaligned(gva, addr, 0, 0); +#endif + } + +#ifndef RISCV_ENABLE_COMMITLOG +# define READ_MEM(addr, size) ({}) +#else +# define READ_MEM(addr, size) \ + proc->state.log_mem_read.push_back(std::make_tuple(addr, 0, size)); +#endif + + // template for functions that load an aligned value from memory + #define load_func(type, prefix, xlate_flags) \ + inline type##_t prefix##_##type(reg_t addr, bool require_alignment = false) { \ + if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (require_alignment) load_reserved_address_misaligned(addr); \ + else return misaligned_load(addr, sizeof(type##_t), xlate_flags); \ + } \ + reg_t vpn = addr >> PGSHIFT; \ + size_t size = sizeof(type##_t); \ + if ((xlate_flags) == 0 && likely(tlb_load_tag[vpn % TLB_ENTRIES] == vpn)) { \ + if (proc) READ_MEM(addr, size); \ + return from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ + } \ + if ((xlate_flags) == 0 && unlikely(tlb_load_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ + type##_t data = from_target(*(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr)); \ + if (!matched_trigger) { \ + matched_trigger = trigger_exception(triggers::OPERATION_LOAD, addr, data); \ + if (matched_trigger) \ + throw *matched_trigger; \ + } \ + if (proc) READ_MEM(addr, size); \ + return data; \ + } \ + target_endian res; \ + load_slow_path(addr, sizeof(type##_t), (uint8_t*)&res, (xlate_flags)); \ + if (proc) READ_MEM(addr, size); \ + return from_target(res); \ + } + + // load value from memory at aligned address; zero extend to register width + load_func(uint8, load, 0) + load_func(uint16, load, 0) + load_func(uint32, load, 0) + load_func(uint64, load, 0) + + // load value from guest memory at aligned address; zero extend to register width + load_func(uint8, guest_load, RISCV_XLATE_VIRT) + load_func(uint16, guest_load, RISCV_XLATE_VIRT) + load_func(uint32, guest_load, RISCV_XLATE_VIRT) + load_func(uint64, guest_load, RISCV_XLATE_VIRT) + load_func(uint16, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) + load_func(uint32, guest_load_x, RISCV_XLATE_VIRT|RISCV_XLATE_VIRT_HLVX) + + // load value from memory at aligned address; sign extend to register width + load_func(int8, load, 0) + load_func(int16, load, 0) + load_func(int32, load, 0) + load_func(int64, load, 0) + + // load value from guest memory at aligned address; sign extend to register width + load_func(int8, guest_load, RISCV_XLATE_VIRT) + load_func(int16, guest_load, RISCV_XLATE_VIRT) + load_func(int32, guest_load, RISCV_XLATE_VIRT) + load_func(int64, guest_load, RISCV_XLATE_VIRT) + +#ifndef RISCV_ENABLE_COMMITLOG +# define WRITE_MEM(addr, value, size) ({}) +#else +# define WRITE_MEM(addr, val, size) \ + proc->state.log_mem_write.push_back(std::make_tuple(addr, val, size)); +#endif + + // template for functions that store an aligned value to memory + #define store_func(type, prefix, xlate_flags) \ + void prefix##_##type(reg_t addr, type##_t val, bool actually_store=true, bool require_alignment=false) { \ + if (unlikely(addr & (sizeof(type##_t)-1))) { \ + if (require_alignment) store_conditional_address_misaligned(addr); \ + else return misaligned_store(addr, val, sizeof(type##_t), xlate_flags, actually_store); \ + } \ + reg_t vpn = addr >> PGSHIFT; \ + size_t size = sizeof(type##_t); \ + if ((xlate_flags) == 0 && likely(tlb_store_tag[vpn % TLB_ENTRIES] == vpn)) { \ + if (actually_store) { \ + if (proc) WRITE_MEM(addr, val, size); \ + *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ + } \ + } \ + else if ((xlate_flags) == 0 && unlikely(tlb_store_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { \ + if (actually_store) { \ + if (!matched_trigger) { \ + matched_trigger = trigger_exception(triggers::OPERATION_STORE, addr, val); \ + if (matched_trigger) \ + throw *matched_trigger; \ + } \ + if (proc) WRITE_MEM(addr, val, size); \ + *(target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr) = to_target(val); \ + } \ + } \ + else { \ + target_endian target_val = to_target(val); \ + store_slow_path(addr, sizeof(type##_t), (const uint8_t*)&target_val, (xlate_flags), actually_store); \ + if (actually_store && proc) WRITE_MEM(addr, val, size); \ + } \ + } + + // AMO/Zicbom faults should be reported as store faults + #define convert_load_traps_to_store_traps(BODY) \ + try { \ + BODY \ + } catch (trap_load_address_misaligned& t) { \ + /* Misaligned fault will not be triggered by Zicbom */ \ + throw trap_store_address_misaligned(t.has_gva(), t.get_tval(), t.get_tval2(), t.get_tinst()); \ + } catch (trap_load_page_fault& t) { \ + throw trap_store_page_fault(t.has_gva(), t.get_tval(), t.get_tval2(), t.get_tinst()); \ + } catch (trap_load_access_fault& t) { \ + throw trap_store_access_fault(t.has_gva(), t.get_tval(), t.get_tval2(), t.get_tinst()); \ + } catch (trap_load_guest_page_fault& t) { \ + throw trap_store_guest_page_fault(t.get_tval(), t.get_tval2(), t.get_tinst()); \ + } + + // template for functions that perform an atomic memory operation + #define amo_func(type) \ + template \ + type##_t amo_##type(reg_t addr, op f) { \ + convert_load_traps_to_store_traps({ \ + store_##type(addr, 0, false, true); \ + auto lhs = load_##type(addr, true); \ + store_##type(addr, f(lhs)); \ + return lhs; \ + }) \ + } + + void store_float128(reg_t addr, float128_t val) + { +#ifndef RISCV_ENABLE_MISALIGNED + if (unlikely(addr & (sizeof(float128_t)-1))) + throw trap_store_address_misaligned((proc) ? proc->state.v : false, addr, 0, 0); +#endif + store_uint64(addr, val.v[0]); + store_uint64(addr + 8, val.v[1]); + } + + float128_t load_float128(reg_t addr) + { +#ifndef RISCV_ENABLE_MISALIGNED + if (unlikely(addr & (sizeof(float128_t)-1))) + throw trap_load_address_misaligned((proc) ? proc->state.v : false, addr, 0, 0); +#endif + return (float128_t){load_uint64(addr), load_uint64(addr + 8)}; + } + + // store value to memory at aligned address + store_func(uint8, store, 0) + store_func(uint16, store, 0) + store_func(uint32, store, 0) + store_func(uint64, store, 0) + + // store value to guest memory at aligned address + store_func(uint8, guest_store, RISCV_XLATE_VIRT) + store_func(uint16, guest_store, RISCV_XLATE_VIRT) + store_func(uint32, guest_store, RISCV_XLATE_VIRT) + store_func(uint64, guest_store, RISCV_XLATE_VIRT) + + // perform an atomic memory operation at an aligned address + amo_func(uint32) + amo_func(uint64) + + void cbo_zero(reg_t addr) { + auto base = addr & ~(blocksz - 1); + for (size_t offset = 0; offset < blocksz; offset += 1) + store_uint8(base + offset, 0); + } + + void clean_inval(reg_t addr, bool clean, bool inval) { + convert_load_traps_to_store_traps({ + reg_t paddr = addr & ~(blocksz - 1); + paddr = translate(paddr, blocksz, LOAD, 0); + if (auto host_addr = sim->addr_to_mem(paddr)) { + if (tracer.interested_in_range(paddr, paddr + PGSIZE, LOAD)) + tracer.clean_invalidate(paddr, blocksz, clean, inval); + } else { + throw trap_store_access_fault((proc) ? proc->state.v : false, addr, 0, 0); + } + }) + } + + inline void yield_load_reservation() + { + load_reservation_address = (reg_t)-1; + } + + inline void acquire_load_reservation(reg_t vaddr) + { + reg_t paddr = translate(vaddr, 1, LOAD, 0); + if (auto host_addr = sim->addr_to_mem(paddr)) + load_reservation_address = refill_tlb(vaddr, paddr, host_addr, LOAD).target_offset + vaddr; + else + throw trap_load_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow LR to I/O space + } + + inline void load_reserved_address_misaligned(reg_t vaddr) + { + bool gva = proc ? proc->state.v : false; +#ifdef RISCV_ENABLE_MISALIGNED + throw trap_load_access_fault(gva, vaddr, 0, 0); +#else + throw trap_load_address_misaligned(gva, vaddr, 0, 0); +#endif + } + + inline void store_conditional_address_misaligned(reg_t vaddr) + { + bool gva = proc ? proc->state.v : false; +#ifdef RISCV_ENABLE_MISALIGNED + throw trap_store_access_fault(gva, vaddr, 0, 0); +#else + throw trap_store_address_misaligned(gva, vaddr, 0, 0); +#endif + } + + inline bool check_load_reservation(reg_t vaddr, size_t size) + { + if (vaddr & (size-1)) + store_conditional_address_misaligned(vaddr); + + reg_t paddr = translate(vaddr, 1, STORE, 0); + if (auto host_addr = sim->addr_to_mem(paddr)) + return load_reservation_address == refill_tlb(vaddr, paddr, host_addr, STORE).target_offset + vaddr; + else + throw trap_store_access_fault((proc) ? proc->state.v : false, vaddr, 0, 0); // disallow SC to I/O space + } + + static const reg_t ICACHE_ENTRIES = 1024; + + inline size_t icache_index(reg_t addr) + { + return (addr / PC_ALIGN) % ICACHE_ENTRIES; + } + + inline icache_entry_t* refill_icache(reg_t addr, icache_entry_t* entry) + { + auto tlb_entry = translate_insn_addr(addr); + insn_bits_t insn = from_le(*(uint16_t*)(tlb_entry.host_offset + addr)); + int length = insn_length(insn); + + if (likely(length == 4)) { + insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 2)) << 16; + } else if (length == 2) { + insn = (int16_t)insn; + } else if (length == 6) { + insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 4)) << 32; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; + } else { + static_assert(sizeof(insn_bits_t) == 8, "insn_bits_t must be uint64_t"); + insn |= (insn_bits_t)from_le(*(const int16_t*)translate_insn_addr_to_host(addr + 6)) << 48; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 4)) << 32; + insn |= (insn_bits_t)from_le(*(const uint16_t*)translate_insn_addr_to_host(addr + 2)) << 16; + } + + insn_fetch_t fetch = {proc->decode_insn(insn), insn}; + entry->tag = addr; + entry->next = &icache[icache_index(addr + length)]; + entry->data = fetch; + + reg_t paddr = tlb_entry.target_offset + addr;; + if (tracer.interested_in_range(paddr, paddr + 1, FETCH)) { + entry->tag = -1; + tracer.trace(paddr, length, FETCH); + } + return entry; + } + + inline icache_entry_t* access_icache(reg_t addr) + { + icache_entry_t* entry = &icache[icache_index(addr)]; + if (likely(entry->tag == addr)) + return entry; + return refill_icache(addr, entry); + } + + inline insn_fetch_t load_insn(reg_t addr) + { + icache_entry_t entry; + return refill_icache(addr, &entry)->data; + } + + void flush_tlb(); + void flush_icache(); + + void register_memtracer(memtracer_t*); + + int is_dirty_enabled() + { +#ifdef RISCV_ENABLE_DIRTY + return 1; +#else + return 0; +#endif + } + + int is_misaligned_enabled() + { +#ifdef RISCV_ENABLE_MISALIGNED + return 1; +#else + return 0; +#endif + } + + void set_target_big_endian(bool enable) + { +#ifdef RISCV_ENABLE_DUAL_ENDIAN + target_big_endian = enable; +#else + assert(enable == false); +#endif + } + + bool is_target_big_endian() + { + return target_big_endian; + } + + template inline T from_target(target_endian n) const + { + return target_big_endian? n.from_be() : n.from_le(); + } + + template inline target_endian to_target(T n) const + { + return target_big_endian? target_endian::to_be(n) : target_endian::to_le(n); + } + + void set_cache_blocksz(uint64_t size) + { + blocksz = size; + } + +private: + simif_t* sim; + processor_t* proc; + memtracer_list_t tracer; + reg_t load_reservation_address; + uint16_t fetch_temp; + uint64_t blocksz; + + // implement an instruction cache for simulator performance + icache_entry_t icache[ICACHE_ENTRIES]; + + // implement a TLB for simulator performance + static const reg_t TLB_ENTRIES = 256; + // If a TLB tag has TLB_CHECK_TRIGGERS set, then the MMU must check for a + // trigger match before completing an access. + static const reg_t TLB_CHECK_TRIGGERS = reg_t(1) << 63; + tlb_entry_t tlb_data[TLB_ENTRIES]; + reg_t tlb_insn_tag[TLB_ENTRIES]; + reg_t tlb_load_tag[TLB_ENTRIES]; + reg_t tlb_store_tag[TLB_ENTRIES]; + + // finish translation on a TLB miss and update the TLB + tlb_entry_t refill_tlb(reg_t vaddr, reg_t paddr, char* host_addr, access_type type); + const char* fill_from_mmio(reg_t vaddr, reg_t paddr); + + // perform a stage2 translation for a given guest address + reg_t s2xlate(reg_t gva, reg_t gpa, access_type type, access_type trap_type, bool virt, bool hlvx); + + // perform a page table walk for a given VA; set referenced/dirty bits + reg_t walk(reg_t addr, access_type type, reg_t prv, bool virt, bool hlvx); + + // handle uncommon cases: TLB misses, page faults, MMIO + tlb_entry_t fetch_slow_path(reg_t addr); + void load_slow_path(reg_t addr, reg_t len, uint8_t* bytes, uint32_t xlate_flags); + void store_slow_path(reg_t addr, reg_t len, const uint8_t* bytes, uint32_t xlate_flags, bool actually_store); + bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); + bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); + bool mmio_ok(reg_t addr, access_type type); + reg_t translate(reg_t addr, reg_t len, access_type type, uint32_t xlate_flags); + + // ITLB lookup + inline tlb_entry_t translate_insn_addr(reg_t addr) { + reg_t vpn = addr >> PGSHIFT; + if (likely(tlb_insn_tag[vpn % TLB_ENTRIES] == vpn)) + return tlb_data[vpn % TLB_ENTRIES]; + tlb_entry_t result; + if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] != (vpn | TLB_CHECK_TRIGGERS))) { + result = fetch_slow_path(addr); + } else { + result = tlb_data[vpn % TLB_ENTRIES]; + } + if (unlikely(tlb_insn_tag[vpn % TLB_ENTRIES] == (vpn | TLB_CHECK_TRIGGERS))) { + target_endian* ptr = (target_endian*)(tlb_data[vpn % TLB_ENTRIES].host_offset + addr); + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, triggers::OPERATION_EXECUTE, addr, from_target(*ptr)); + if (match != triggers::MATCH_NONE) { + throw triggers::matched_t(triggers::OPERATION_EXECUTE, addr, from_target(*ptr), action); + } + } + return result; + } + + inline const uint16_t* translate_insn_addr_to_host(reg_t addr) { + return (uint16_t*)(translate_insn_addr(addr).host_offset + addr); + } + + inline triggers::matched_t *trigger_exception(triggers::operation_t operation, + reg_t address, reg_t data) + { + if (!proc) { + return NULL; + } + triggers::action_t action; + auto match = proc->TM.memory_access_match(&action, operation, address, data); + if (match == triggers::MATCH_NONE) + return NULL; + if (match == triggers::MATCH_FIRE_BEFORE) { + throw triggers::matched_t(operation, address, data, action); + } + return new triggers::matched_t(operation, address, data, action); + } + + reg_t pmp_homogeneous(reg_t addr, reg_t len); + bool pmp_ok(reg_t addr, reg_t len, access_type type, reg_t mode); + +#ifdef RISCV_ENABLE_DUAL_ENDIAN + bool target_big_endian; +#else + static const bool target_big_endian = false; +#endif + bool check_triggers_fetch; + bool check_triggers_load; + bool check_triggers_store; + // The exception describing a matched trigger, or NULL. + triggers::matched_t *matched_trigger; + + friend class processor_t; +}; + +struct vm_info { + int levels; + int idxbits; + int widenbits; + int ptesize; + reg_t ptbase; +}; + +inline vm_info decode_vm_info(int xlen, bool stage2, reg_t prv, reg_t satp) +{ + if (prv == PRV_M) { + return {0, 0, 0, 0, 0}; + } else if (!stage2 && prv <= PRV_S && xlen == 32) { + switch (get_field(satp, SATP32_MODE)) { + case SATP_MODE_OFF: return {0, 0, 0, 0, 0}; + case SATP_MODE_SV32: return {2, 10, 0, 4, (satp & SATP32_PPN) << PGSHIFT}; + default: abort(); + } + } else if (!stage2 && prv <= PRV_S && xlen == 64) { + switch (get_field(satp, SATP64_MODE)) { + case SATP_MODE_OFF: return {0, 0, 0, 0, 0}; + case SATP_MODE_SV39: return {3, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT}; + case SATP_MODE_SV48: return {4, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT}; + case SATP_MODE_SV57: return {5, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT}; + case SATP_MODE_SV64: return {6, 9, 0, 8, (satp & SATP64_PPN) << PGSHIFT}; + default: abort(); + } + } else if (stage2 && xlen == 32) { + switch (get_field(satp, HGATP32_MODE)) { + case HGATP_MODE_OFF: return {0, 0, 0, 0, 0}; + case HGATP_MODE_SV32X4: return {2, 10, 2, 4, (satp & HGATP32_PPN) << PGSHIFT}; + default: abort(); + } + } else if (stage2 && xlen == 64) { + switch (get_field(satp, HGATP64_MODE)) { + case HGATP_MODE_OFF: return {0, 0, 0, 0, 0}; + case HGATP_MODE_SV39X4: return {3, 9, 2, 8, (satp & HGATP64_PPN) << PGSHIFT}; + case HGATP_MODE_SV48X4: return {4, 9, 2, 8, (satp & HGATP64_PPN) << PGSHIFT}; + case HGATP_MODE_SV57X4: return {5, 9, 2, 8, (satp & HGATP64_PPN) << PGSHIFT}; + default: abort(); + } + } else { + abort(); + } +} + +#endif diff --git a/vendor/riscv-isa-sim/riscv/opcodes.h b/vendor/riscv-isa-sim/riscv/opcodes.h new file mode 100644 index 00000000..065934a2 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/opcodes.h @@ -0,0 +1,249 @@ +#include "encoding.h" + +#define ZERO 0 +#define T0 5 +#define S0 8 +#define S1 9 + +static uint32_t bits(uint32_t value, unsigned int hi, unsigned int lo) { + return (value >> lo) & ((1 << (hi+1-lo)) - 1); +} + +static uint32_t bit(uint32_t value, unsigned int b) { + return (value >> b) & 1; +} + +static uint32_t jal(unsigned int rd, uint32_t imm) __attribute__ ((unused)); +static uint32_t jal(unsigned int rd, uint32_t imm) { + return (bit(imm, 20) << 31) | + (bits(imm, 10, 1) << 21) | + (bit(imm, 11) << 20) | + (bits(imm, 19, 12) << 12) | + (rd << 7) | + MATCH_JAL; +} + +static uint32_t csrsi(unsigned int csr, uint16_t imm) __attribute__ ((unused)); +static uint32_t csrsi(unsigned int csr, uint16_t imm) { + return (csr << 20) | + (bits(imm, 4, 0) << 15) | + MATCH_CSRRSI; +} + +static uint32_t sw(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t sw(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (src << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_SW; +} + +static uint32_t sd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t sd(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (src << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_SD; +} + +static uint32_t sh(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t sh(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (src << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_SH; +} + +static uint32_t sb(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t sb(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (src << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_SB; +} + +static uint32_t ld(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t ld(unsigned int rd, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(rd, 4, 0) << 7) | + MATCH_LD; +} + +static uint32_t lw(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t lw(unsigned int rd, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(rd, 4, 0) << 7) | + MATCH_LW; +} + +static uint32_t lh(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t lh(unsigned int rd, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(rd, 4, 0) << 7) | + MATCH_LH; +} + +static uint32_t lb(unsigned int rd, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t lb(unsigned int rd, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(rd, 4, 0) << 7) | + MATCH_LB; +} + +static uint32_t csrw(unsigned int source, unsigned int csr) __attribute__ ((unused)); +static uint32_t csrw(unsigned int source, unsigned int csr) { + return (csr << 20) | (source << 15) | MATCH_CSRRW; +} + +static uint32_t addi(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused)); +static uint32_t addi(unsigned int dest, unsigned int src, uint16_t imm) +{ + return (bits(imm, 11, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_ADDI; +} + +static uint32_t csrr(unsigned int rd, unsigned int csr) __attribute__ ((unused)); +static uint32_t csrr(unsigned int rd, unsigned int csr) { + return (csr << 20) | (rd << 7) | MATCH_CSRRS; +} + +static uint32_t csrrs(unsigned int rd, unsigned int rs1, unsigned int csr) __attribute__ ((unused)); +static uint32_t csrrs(unsigned int rd, unsigned int rs1, unsigned int csr) { + return (csr << 20) | (rs1 << 15) | (rd << 7) | MATCH_CSRRS; +} + +static uint32_t fsw(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t fsw(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (bits(src, 4, 0) << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_FSW; +} + +static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (bits(src, 4, 0) << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_FSD; +} + +static uint32_t flw(unsigned int dest, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t flw(unsigned int dest, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(dest, 4, 0) << 7) | + MATCH_FLW; +} + +static uint32_t fld(unsigned int dest, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t fld(unsigned int dest, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 0) << 20) | + (base << 15) | + (bits(dest, 4, 0) << 7) | + MATCH_FLD; +} + +static uint32_t ebreak(void) __attribute__ ((unused)); +static uint32_t ebreak(void) { return MATCH_EBREAK; } +static uint32_t ebreak_c(void) __attribute__ ((unused)); +static uint32_t ebreak_c(void) { return MATCH_C_EBREAK; } + +static uint32_t dret(void) __attribute__ ((unused)); +static uint32_t dret(void) { return MATCH_DRET; } + +static uint32_t fence_i(void) __attribute__ ((unused)); +static uint32_t fence_i(void) +{ + return MATCH_FENCE_I; +} + +static uint32_t lui(unsigned int dest, uint32_t imm) __attribute__ ((unused)); +static uint32_t lui(unsigned int dest, uint32_t imm) +{ + return (bits(imm, 19, 0) << 12) | + (dest << 7) | + MATCH_LUI; +} + +/* +static uint32_t csrci(unsigned int csr, uint16_t imm) __attribute__ ((unused)); +static uint32_t csrci(unsigned int csr, uint16_t imm) { + return (csr << 20) | + (bits(imm, 4, 0) << 15) | + MATCH_CSRRCI; +} + +static uint32_t li(unsigned int dest, uint16_t imm) __attribute__ ((unused)); +static uint32_t li(unsigned int dest, uint16_t imm) +{ + return addi(dest, 0, imm); +} + +static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) __attribute__ ((unused)); +static uint32_t fsd(unsigned int src, unsigned int base, uint16_t offset) +{ + return (bits(offset, 11, 5) << 25) | + (bits(src, 4, 0) << 20) | + (base << 15) | + (bits(offset, 4, 0) << 7) | + MATCH_FSD; +} + +static uint32_t ori(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused)); +static uint32_t ori(unsigned int dest, unsigned int src, uint16_t imm) +{ + return (bits(imm, 11, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_ORI; +} + +static uint32_t nop(void) __attribute__ ((unused)); +static uint32_t nop(void) +{ + return addi(0, 0, 0); +} +*/ + +static uint32_t xori(unsigned int dest, unsigned int src, uint16_t imm) __attribute__ ((unused)); +static uint32_t xori(unsigned int dest, unsigned int src, uint16_t imm) +{ + return (bits(imm, 11, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_XORI; +} + +static uint32_t srli(unsigned int dest, unsigned int src, uint8_t shamt) __attribute__ ((unused)); +static uint32_t srli(unsigned int dest, unsigned int src, uint8_t shamt) +{ + return (bits(shamt, 4, 0) << 20) | + (src << 15) | + (dest << 7) | + MATCH_SRLI; +} diff --git a/vendor/riscv-isa-sim/riscv/overlap_list.h b/vendor/riscv-isa-sim/riscv/overlap_list.h new file mode 100644 index 00000000..2bc7f42d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/overlap_list.h @@ -0,0 +1,8 @@ +DECLARE_OVERLAP_INSN(c_fsdsp, 'C') +DECLARE_OVERLAP_INSN(c_fsdsp, 'D') +DECLARE_OVERLAP_INSN(c_fld, 'C') +DECLARE_OVERLAP_INSN(c_fld, 'D') +DECLARE_OVERLAP_INSN(c_fldsp, 'C') +DECLARE_OVERLAP_INSN(c_fldsp, 'D') +DECLARE_OVERLAP_INSN(c_fsd, 'C') +DECLARE_OVERLAP_INSN(c_fsd, 'D') diff --git a/vendor/riscv-isa-sim/riscv/platform.h b/vendor/riscv-isa-sim/riscv/platform.h new file mode 100644 index 00000000..6618d44e --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/platform.h @@ -0,0 +1,11 @@ +// See LICENSE for license details. +#ifndef _RISCV_PLATFORM_H +#define _RISCV_PLATFORM_H + +#define DEFAULT_RSTVEC 0x00001000 +#define CLINT_BASE 0x02000000 +#define CLINT_SIZE 0x000c0000 +#define EXT_IO_BASE 0x40000000 +#define DRAM_BASE 0x80000000 + +#endif diff --git a/vendor/riscv-isa-sim/riscv/processor.cc b/vendor/riscv-isa-sim/riscv/processor.cc new file mode 100644 index 00000000..9ce9287c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/processor.cc @@ -0,0 +1,1028 @@ +// See LICENSE for license details. + +#include "arith.h" +#include "processor.h" +#include "extension.h" +#include "common.h" +#include "config.h" +#include "simif.h" +#include "mmu.h" +#include "disasm.h" +#include "platform.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef STATE +#define STATE state + +processor_t::processor_t(const isa_parser_t *isa, const char* varch, + simif_t* sim, uint32_t id, bool halt_on_reset, + FILE* log_file, std::ostream& sout_) + : debug(false), halt_request(HR_NONE), isa(isa), sim(sim), id(id), xlen(0), + histogram_enabled(false), log_commits_enabled(false), + log_file(log_file), sout_(sout_.rdbuf()), halt_on_reset(halt_on_reset), + impl_table(256, false), last_pc(1), executions(1), TM(4) +{ + VU.p = this; + TM.proc = this; + +#ifndef __SIZEOF_INT128__ + if (extension_enabled('V')) { + fprintf(stderr, "V extension is not supported on platforms without __int128 type\n"); + abort(); + } +#endif + + parse_varch_string(varch); + + register_base_instructions(); + mmu = new mmu_t(sim, this); + + disassembler = new disassembler_t(isa); + for (auto e : isa->get_extensions()) + register_extension(e.second); + + set_pmp_granularity(1 << PMP_SHIFT); + set_pmp_num(state.max_pmp); + + if (isa->get_max_xlen() == 32) + set_mmu_capability(IMPL_MMU_SV32); + else if (isa->get_max_xlen() == 64) + set_mmu_capability(IMPL_MMU_SV48); + + set_impl(IMPL_MMU_ASID, true); + set_impl(IMPL_MMU_VMID, true); + + reset(); +} + +processor_t::~processor_t() +{ +#ifdef RISCV_ENABLE_HISTOGRAM + if (histogram_enabled) + { + fprintf(stderr, "PC Histogram size:%zu\n", pc_histogram.size()); + for (auto it : pc_histogram) + fprintf(stderr, "%0" PRIx64 " %" PRIu64 "\n", it.first, it.second); + } +#endif + + delete mmu; + delete disassembler; +} + +static void bad_option_string(const char *option, const char *value, + const char *msg) +{ + fprintf(stderr, "error: bad %s option '%s'. %s\n", option, value, msg); + abort(); +} + +static void bad_varch_string(const char* varch, const char *msg) +{ + bad_option_string("--varch", varch, msg); +} + +static std::string get_string_token(std::string str, const char delimiter, size_t& pos) +{ + size_t _pos = pos; + while (pos < str.length() && str[pos] != delimiter) ++pos; + return str.substr(_pos, pos - _pos); +} + +static int get_int_token(std::string str, const char delimiter, size_t& pos) +{ + size_t _pos = pos; + while (pos < str.length() && str[pos] != delimiter) { + if (!isdigit(str[pos])) + bad_varch_string(str.c_str(), "Unsupported value"); // An integer is expected + ++pos; + } + return (pos == _pos) ? 0 : stoi(str.substr(_pos, pos - _pos)); +} + +static bool check_pow2(int val) +{ + return ((val & (val - 1))) == 0; +} + +static std::string strtolower(const char* str) +{ + std::string res; + for (const char *r = str; *r; r++) + res += std::tolower(*r); + return res; +} + +void processor_t::parse_varch_string(const char* s) +{ + std::string str = strtolower(s); + size_t pos = 0; + size_t len = str.length(); + int vlen = 0; + int elen = 0; + int vstart_alu = 0; + + while (pos < len) { + std::string attr = get_string_token(str, ':', pos); + + ++pos; + + if (attr == "vlen") + vlen = get_int_token(str, ',', pos); + else if (attr == "elen") + elen = get_int_token(str, ',', pos); + else if (attr == "vstartalu") + vstart_alu = get_int_token(str, ',', pos); + else + bad_varch_string(s, "Unsupported token"); + + ++pos; + } + + // The integer should be the power of 2 + if (!check_pow2(vlen) || !check_pow2(elen)) { + bad_varch_string(s, "The integer value should be the power of 2"); + } + + /* Vector spec requirements. */ + if (vlen < elen) + bad_varch_string(s, "vlen must be >= elen"); + + /* spike requirements. */ + if (vlen > 4096) + bad_varch_string(s, "vlen must be <= 4096"); + + VU.VLEN = vlen; + VU.ELEN = elen; + VU.vlenb = vlen / 8; + VU.vstart_alu = vstart_alu; +} + +static int xlen_to_uxl(int xlen) +{ + if (xlen == 32) + return 1; + if (xlen == 64) + return 2; + abort(); +} + +void state_t::reset(processor_t* const proc, reg_t max_isa) +{ + pc = DEFAULT_RSTVEC; + XPR.reset(); + FPR.reset(); + + // This assumes xlen is always max_xlen, which is true today (see + // mstatus_csr_t::unlogged_write()): + auto xlen = proc->get_isa().get_max_xlen(); + + prv = PRV_M; + v = false; + csrmap[CSR_MISA] = misa = std::make_shared(proc, CSR_MISA, max_isa); + csrmap[CSR_MSTATUS] = mstatus = std::make_shared(proc, CSR_MSTATUS); + if (xlen == 32) csrmap[CSR_MSTATUSH] = std::make_shared(proc, CSR_MSTATUSH, mstatus); + csrmap[CSR_MEPC] = mepc = std::make_shared(proc, CSR_MEPC); + csrmap[CSR_MTVAL] = mtval = std::make_shared(proc, CSR_MTVAL, 0); + csrmap[CSR_MSCRATCH] = std::make_shared(proc, CSR_MSCRATCH, 0); + csrmap[CSR_MTVEC] = mtvec = std::make_shared(proc, CSR_MTVEC); + csrmap[CSR_MCAUSE] = mcause = std::make_shared(proc, CSR_MCAUSE); + csrmap[CSR_MINSTRET] = minstret = std::make_shared(proc, CSR_MINSTRET); + csrmap[CSR_MCYCLE] = mcycle = std::make_shared(proc, CSR_MCYCLE); + if (proc->extension_enabled_const(EXT_ZICNTR)) { + csrmap[CSR_INSTRET] = std::make_shared(proc, CSR_INSTRET, minstret); + csrmap[CSR_CYCLE] = std::make_shared(proc, CSR_CYCLE, mcycle); + } + if (xlen == 32) { + counter_top_csr_t_p minstreth, mcycleh; + csrmap[CSR_MINSTRETH] = minstreth = std::make_shared(proc, CSR_MINSTRETH, minstret); + csrmap[CSR_MCYCLEH] = mcycleh = std::make_shared(proc, CSR_MCYCLEH, mcycle); + if (proc->extension_enabled_const(EXT_ZICNTR)) { + csrmap[CSR_INSTRETH] = std::make_shared(proc, CSR_INSTRETH, minstreth); + csrmap[CSR_CYCLEH] = std::make_shared(proc, CSR_CYCLEH, mcycleh); + } + } + for (reg_t i = 3; i <= 31; ++i) { + const reg_t which_mevent = CSR_MHPMEVENT3 + i - 3; + const reg_t which_mcounter = CSR_MHPMCOUNTER3 + i - 3; + const reg_t which_mcounterh = CSR_MHPMCOUNTER3H + i - 3; + const reg_t which_counter = CSR_HPMCOUNTER3 + i - 3; + const reg_t which_counterh = CSR_HPMCOUNTER3H + i - 3; + auto mevent = std::make_shared(proc, which_mevent, 0); + auto mcounter = std::make_shared(proc, which_mcounter, 0); + csrmap[which_mevent] = mevent; + csrmap[which_mcounter] = mcounter; + + if (proc->extension_enabled_const(EXT_ZICNTR) && proc->extension_enabled_const(EXT_ZIHPM)) { + auto counter = std::make_shared(proc, which_counter, mcounter); + csrmap[which_counter] = counter; + } + if (xlen == 32) { + auto mcounterh = std::make_shared(proc, which_mcounterh, 0); + csrmap[which_mcounterh] = mcounterh; + if (proc->extension_enabled_const(EXT_ZICNTR) && proc->extension_enabled_const(EXT_ZIHPM)) { + auto counterh = std::make_shared(proc, which_counterh, mcounterh); + csrmap[which_counterh] = counterh; + } + } + } + csrmap[CSR_MCOUNTINHIBIT] = std::make_shared(proc, CSR_MCOUNTINHIBIT, 0); + csrmap[CSR_MIE] = mie = std::make_shared(proc, CSR_MIE); + csrmap[CSR_MIP] = mip = std::make_shared(proc, CSR_MIP); + auto sip_sie_accr = std::make_shared( + this, + ~MIP_HS_MASK, // read_mask + MIP_SSIP, // ip_write_mask + ~MIP_HS_MASK, // ie_write_mask + generic_int_accessor_t::mask_mode_t::MIDELEG, + 0 // shiftamt + ); + + auto hip_hie_accr = std::make_shared( + this, + MIP_HS_MASK, // read_mask + MIP_VSSIP, // ip_write_mask + MIP_HS_MASK, // ie_write_mask + generic_int_accessor_t::mask_mode_t::MIDELEG, + 0 // shiftamt + ); + + auto hvip_accr = std::make_shared( + this, + MIP_VS_MASK, // read_mask + MIP_VS_MASK, // ip_write_mask + MIP_VS_MASK, // ie_write_mask + generic_int_accessor_t::mask_mode_t::NONE, + 0 // shiftamt + ); + + auto vsip_vsie_accr = std::make_shared( + this, + MIP_VS_MASK, // read_mask + MIP_VSSIP, // ip_write_mask + MIP_VS_MASK, // ie_write_mask + generic_int_accessor_t::mask_mode_t::HIDELEG, + 1 // shiftamt + ); + + auto nonvirtual_sip = std::make_shared(proc, CSR_SIP, sip_sie_accr); + auto vsip = std::make_shared(proc, CSR_VSIP, vsip_vsie_accr); + csrmap[CSR_VSIP] = vsip; + csrmap[CSR_SIP] = std::make_shared(proc, nonvirtual_sip, vsip); + csrmap[CSR_HIP] = std::make_shared(proc, CSR_HIP, hip_hie_accr); + csrmap[CSR_HVIP] = std::make_shared(proc, CSR_HVIP, hvip_accr); + + auto nonvirtual_sie = std::make_shared(proc, CSR_SIE, sip_sie_accr); + auto vsie = std::make_shared(proc, CSR_VSIE, vsip_vsie_accr); + csrmap[CSR_VSIE] = vsie; + csrmap[CSR_SIE] = std::make_shared(proc, nonvirtual_sie, vsie); + csrmap[CSR_HIE] = std::make_shared(proc, CSR_HIE, hip_hie_accr); + + csrmap[CSR_MEDELEG] = medeleg = std::make_shared(proc, CSR_MEDELEG); + csrmap[CSR_MIDELEG] = mideleg = std::make_shared(proc, CSR_MIDELEG); + const reg_t counteren_mask = 0xffffffffULL; + mcounteren = std::make_shared(proc, CSR_MCOUNTEREN, counteren_mask, 0); + if (proc->extension_enabled_const('U')) csrmap[CSR_MCOUNTEREN] = mcounteren; + csrmap[CSR_SCOUNTEREN] = scounteren = std::make_shared(proc, CSR_SCOUNTEREN, counteren_mask, 0); + auto nonvirtual_sepc = std::make_shared(proc, CSR_SEPC); + csrmap[CSR_VSEPC] = vsepc = std::make_shared(proc, CSR_VSEPC); + csrmap[CSR_SEPC] = sepc = std::make_shared(proc, nonvirtual_sepc, vsepc); + auto nonvirtual_stval = std::make_shared(proc, CSR_STVAL, 0); + csrmap[CSR_VSTVAL] = vstval = std::make_shared(proc, CSR_VSTVAL, 0); + csrmap[CSR_STVAL] = stval = std::make_shared(proc, nonvirtual_stval, vstval); + auto sscratch = std::make_shared(proc, CSR_SSCRATCH, 0); + auto vsscratch = std::make_shared(proc, CSR_VSSCRATCH, 0); + // Note: if max_isa does not include H, we don't really need this virtualized_csr_t at all (though it doesn't hurt): + csrmap[CSR_SSCRATCH] = std::make_shared(proc, sscratch, vsscratch); + csrmap[CSR_VSSCRATCH] = vsscratch; + auto nonvirtual_stvec = std::make_shared(proc, CSR_STVEC); + csrmap[CSR_VSTVEC] = vstvec = std::make_shared(proc, CSR_VSTVEC); + csrmap[CSR_STVEC] = stvec = std::make_shared(proc, nonvirtual_stvec, vstvec); + auto nonvirtual_satp = std::make_shared(proc, CSR_SATP); + csrmap[CSR_VSATP] = vsatp = std::make_shared(proc, CSR_VSATP); + csrmap[CSR_SATP] = satp = std::make_shared(proc, nonvirtual_satp, vsatp); + auto nonvirtual_scause = std::make_shared(proc, CSR_SCAUSE); + csrmap[CSR_VSCAUSE] = vscause = std::make_shared(proc, CSR_VSCAUSE); + csrmap[CSR_SCAUSE] = scause = std::make_shared(proc, nonvirtual_scause, vscause); + csrmap[CSR_MTVAL2] = mtval2 = std::make_shared(proc, CSR_MTVAL2); + csrmap[CSR_MTINST] = mtinst = std::make_shared(proc, CSR_MTINST); + const reg_t hstatus_init = set_field((reg_t)0, HSTATUS_VSXL, xlen_to_uxl(proc->get_const_xlen())); + const reg_t hstatus_mask = HSTATUS_VTSR | HSTATUS_VTW + | (proc->supports_impl(IMPL_MMU) ? HSTATUS_VTVM : 0) + | HSTATUS_HU | HSTATUS_SPVP | HSTATUS_SPV | HSTATUS_GVA; + csrmap[CSR_HSTATUS] = hstatus = std::make_shared(proc, CSR_HSTATUS, hstatus_mask, hstatus_init); + csrmap[CSR_HGEIE] = std::make_shared(proc, CSR_HGEIE, 0); + csrmap[CSR_HGEIP] = std::make_shared(proc, CSR_HGEIP, 0); + csrmap[CSR_HIDELEG] = hideleg = std::make_shared(proc, CSR_HIDELEG, mideleg); + const reg_t hedeleg_mask = + (1 << CAUSE_MISALIGNED_FETCH) | + (1 << CAUSE_FETCH_ACCESS) | + (1 << CAUSE_ILLEGAL_INSTRUCTION) | + (1 << CAUSE_BREAKPOINT) | + (1 << CAUSE_MISALIGNED_LOAD) | + (1 << CAUSE_LOAD_ACCESS) | + (1 << CAUSE_MISALIGNED_STORE) | + (1 << CAUSE_STORE_ACCESS) | + (1 << CAUSE_USER_ECALL) | + (1 << CAUSE_FETCH_PAGE_FAULT) | + (1 << CAUSE_LOAD_PAGE_FAULT) | + (1 << CAUSE_STORE_PAGE_FAULT); + csrmap[CSR_HEDELEG] = hedeleg = std::make_shared(proc, CSR_HEDELEG, hedeleg_mask, 0); + csrmap[CSR_HCOUNTEREN] = hcounteren = std::make_shared(proc, CSR_HCOUNTEREN, counteren_mask, 0); + csrmap[CSR_HTVAL] = htval = std::make_shared(proc, CSR_HTVAL, 0); + csrmap[CSR_HTINST] = htinst = std::make_shared(proc, CSR_HTINST, 0); + csrmap[CSR_HGATP] = hgatp = std::make_shared(proc, CSR_HGATP); + auto nonvirtual_sstatus = std::make_shared(proc, CSR_SSTATUS, mstatus); + csrmap[CSR_VSSTATUS] = vsstatus = std::make_shared(proc, CSR_VSSTATUS); + csrmap[CSR_SSTATUS] = sstatus = std::make_shared(proc, nonvirtual_sstatus, vsstatus); + + csrmap[CSR_DPC] = dpc = std::make_shared(proc, CSR_DPC); + csrmap[CSR_DSCRATCH0] = std::make_shared(proc, CSR_DSCRATCH0); + csrmap[CSR_DSCRATCH1] = std::make_shared(proc, CSR_DSCRATCH1); + csrmap[CSR_DCSR] = dcsr = std::make_shared(proc, CSR_DCSR); + + csrmap[CSR_TSELECT] = tselect = std::make_shared(proc, CSR_TSELECT); + + csrmap[CSR_TDATA1] = std::make_shared(proc, CSR_TDATA1); + csrmap[CSR_TDATA2] = tdata2 = std::make_shared(proc, CSR_TDATA2); + csrmap[CSR_TDATA3] = std::make_shared(proc, CSR_TDATA3, 0); + debug_mode = false; + single_step = STEP_NONE; + + csrmap[CSR_MSECCFG] = mseccfg = std::make_shared(proc, CSR_MSECCFG); + + for (int i = 0; i < max_pmp; ++i) { + csrmap[CSR_PMPADDR0 + i] = pmpaddr[i] = std::make_shared(proc, CSR_PMPADDR0 + i); + } + for (int i = 0; i < max_pmp; i += xlen / 8) { + reg_t addr = CSR_PMPCFG0 + i / 4; + csrmap[addr] = std::make_shared(proc, addr); + } + + csrmap[CSR_FFLAGS] = fflags = std::make_shared(proc, CSR_FFLAGS, FSR_AEXC >> FSR_AEXC_SHIFT, 0); + csrmap[CSR_FRM] = frm = std::make_shared(proc, CSR_FRM, FSR_RD >> FSR_RD_SHIFT, 0); + assert(FSR_AEXC_SHIFT == 0); // composite_csr_t assumes fflags begins at bit 0 + csrmap[CSR_FCSR] = std::make_shared(proc, CSR_FCSR, frm, fflags, FSR_RD_SHIFT); + + csrmap[CSR_SEED] = std::make_shared(proc, CSR_SEED); + + csrmap[CSR_MARCHID] = std::make_shared(proc, CSR_MARCHID, 5); + csrmap[CSR_MIMPID] = std::make_shared(proc, CSR_MIMPID, 0); + csrmap[CSR_MVENDORID] = std::make_shared(proc, CSR_MVENDORID, 0); + csrmap[CSR_MHARTID] = std::make_shared(proc, CSR_MHARTID, proc->get_id()); + const reg_t menvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? MENVCFG_CBCFE | MENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? MENVCFG_CBZE : 0); + csrmap[CSR_MENVCFG] = menvcfg = std::make_shared(proc, CSR_MENVCFG, menvcfg_mask, 0); + const reg_t senvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? SENVCFG_CBCFE | SENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? SENVCFG_CBZE : 0); + csrmap[CSR_SENVCFG] = senvcfg = std::make_shared(proc, CSR_SENVCFG, senvcfg_mask, 0); + const reg_t henvcfg_mask = (proc->extension_enabled(EXT_ZICBOM) ? HENVCFG_CBCFE | HENVCFG_CBIE : 0) | + (proc->extension_enabled(EXT_ZICBOZ) ? HENVCFG_CBZE : 0); + csrmap[CSR_HENVCFG] = henvcfg = std::make_shared(proc, CSR_HENVCFG, henvcfg_mask, 0); + + serialized = false; + +#ifdef RISCV_ENABLE_COMMITLOG + log_reg_write.clear(); + log_mem_read.clear(); + log_mem_write.clear(); + last_inst_priv = 0; + last_inst_xlen = 0; + last_inst_flen = 0; +#endif +} + +void processor_t::vectorUnit_t::reset() +{ + free(reg_file); + VLEN = get_vlen(); + ELEN = get_elen(); + reg_file = malloc(NVPR * vlenb); + memset(reg_file, 0, NVPR * vlenb); + + auto& csrmap = p->get_state()->csrmap; + csrmap[CSR_VXSAT] = vxsat = std::make_shared(p, CSR_VXSAT); + csrmap[CSR_VSTART] = vstart = std::make_shared(p, CSR_VSTART, /*mask*/ VLEN - 1); + csrmap[CSR_VXRM] = vxrm = std::make_shared(p, CSR_VXRM, /*mask*/ 0x3ul); + csrmap[CSR_VL] = vl = std::make_shared(p, CSR_VL, /*mask*/ 0); + csrmap[CSR_VTYPE] = vtype = std::make_shared(p, CSR_VTYPE, /*mask*/ 0); + csrmap[CSR_VLENB] = std::make_shared(p, CSR_VLENB, /*mask*/ 0, /*init*/ vlenb); + assert(VCSR_VXSAT_SHIFT == 0); // composite_csr_t assumes vxsat begins at bit 0 + csrmap[CSR_VCSR] = std::make_shared(p, CSR_VCSR, vxrm, vxsat, VCSR_VXRM_SHIFT); + + vtype->write_raw(0); + set_vl(0, 0, 0, -1); // default to illegal configuration +} + +reg_t processor_t::vectorUnit_t::set_vl(int rd, int rs1, reg_t reqVL, reg_t newType) +{ + int new_vlmul = 0; + if (vtype->read() != newType) { + vtype->write_raw(newType); + vsew = 1 << (extract64(newType, 3, 3) + 3); + new_vlmul = int8_t(extract64(newType, 0, 3) << 5) >> 5; + vflmul = new_vlmul >= 0 ? 1 << new_vlmul : 1.0 / (1 << -new_vlmul); + vlmax = (VLEN/vsew) * vflmul; + vta = extract64(newType, 6, 1); + vma = extract64(newType, 7, 1); + + vill = !(vflmul >= 0.125 && vflmul <= 8) + || vsew > std::min(vflmul, 1.0f) * ELEN + || (newType >> 8) != 0; + + if (vill) { + vlmax = 0; + vtype->write_raw(UINT64_MAX << (p->get_xlen() - 1)); + } + } + + // set vl + if (vlmax == 0) { + vl->write_raw(0); + } else if (rd == 0 && rs1 == 0) { + vl->write_raw(vl->read() > vlmax ? vlmax : vl->read()); + } else if (rd != 0 && rs1 == 0) { + vl->write_raw(vlmax); + } else if (rs1 != 0) { + vl->write_raw(reqVL > vlmax ? vlmax : reqVL); + } + + vstart->write_raw(0); + setvl_count++; + return vl->read(); +} + +void processor_t::set_debug(bool value) +{ + debug = value; + + for (auto e : custom_extensions) + e.second->set_debug(value); +} + +void processor_t::set_histogram(bool value) +{ + histogram_enabled = value; +#ifndef RISCV_ENABLE_HISTOGRAM + if (value) { + fprintf(stderr, "PC Histogram support has not been properly enabled;"); + fprintf(stderr, " please re-build the riscv-isa-sim project using \"configure --enable-histogram\".\n"); + abort(); + } +#endif +} + +#ifdef RISCV_ENABLE_COMMITLOG +void processor_t::enable_log_commits() +{ + log_commits_enabled = true; +} +#endif + +void processor_t::reset() +{ + xlen = isa->get_max_xlen(); + state.reset(this, isa->get_max_isa()); + state.dcsr->halt = halt_on_reset; + halt_on_reset = false; + VU.reset(); + + if (n_pmp > 0) { + // For backwards compatibility with software that is unaware of PMP, + // initialize PMP to permit unprivileged access to all of memory. + put_csr(CSR_PMPADDR0, ~reg_t(0)); + put_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + } + + for (auto e : custom_extensions) // reset any extensions + e.second->reset(); + + if (sim) + sim->proc_reset(id); +} + +extension_t* processor_t::get_extension() +{ + switch (custom_extensions.size()) { + case 0: return NULL; + case 1: return custom_extensions.begin()->second; + default: + fprintf(stderr, "processor_t::get_extension() is ambiguous when multiple extensions\n"); + fprintf(stderr, "are present!\n"); + abort(); + } +} + +extension_t* processor_t::get_extension(const char* name) +{ + auto it = custom_extensions.find(name); + if (it == custom_extensions.end()) + abort(); + return it->second; +} + +void processor_t::set_pmp_num(reg_t n) +{ + // check the number of pmp is in a reasonable range + if (n > state.max_pmp) { + fprintf(stderr, "error: bad number of pmp regions: '%ld' from the dtb\n", (unsigned long)n); + abort(); + } + n_pmp = n; +} + +void processor_t::set_pmp_granularity(reg_t gran) +{ + // check the pmp granularity is set from dtb(!=0) and is power of 2 + if (gran < (1 << PMP_SHIFT) || (gran & (gran - 1)) != 0) { + fprintf(stderr, "error: bad pmp granularity '%ld' from the dtb\n", (unsigned long)gran); + abort(); + } + + lg_pmp_granularity = ctz(gran); +} + +void processor_t::set_mmu_capability(int cap) +{ + switch (cap) { + case IMPL_MMU_SV32: + set_impl(IMPL_MMU_SV32, true); + set_impl(IMPL_MMU, true); + break; + case IMPL_MMU_SV57: + set_impl(IMPL_MMU_SV57, true); + // Fall through + case IMPL_MMU_SV48: + set_impl(IMPL_MMU_SV48, true); + // Fall through + case IMPL_MMU_SV39: + set_impl(IMPL_MMU_SV39, true); + set_impl(IMPL_MMU, true); + break; + default: + set_impl(IMPL_MMU_SV32, false); + set_impl(IMPL_MMU_SV39, false); + set_impl(IMPL_MMU_SV48, false); + set_impl(IMPL_MMU_SV57, false); + set_impl(IMPL_MMU, false); + break; + } +} + +void processor_t::take_interrupt(reg_t pending_interrupts) +{ + // Do nothing if no pending interrupts + if (!pending_interrupts) { + return; + } + + // M-ints have higher priority over HS-ints and VS-ints + const reg_t mie = get_field(state.mstatus->read(), MSTATUS_MIE); + const reg_t m_enabled = state.prv < PRV_M || (state.prv == PRV_M && mie); + reg_t enabled_interrupts = pending_interrupts & ~state.mideleg->read() & -m_enabled; + if (enabled_interrupts == 0) { + // HS-ints have higher priority over VS-ints + const reg_t deleg_to_hs = state.mideleg->read() & ~state.hideleg->read(); + const reg_t sie = get_field(state.sstatus->read(), MSTATUS_SIE); + const reg_t hs_enabled = state.v || state.prv < PRV_S || (state.prv == PRV_S && sie); + enabled_interrupts = pending_interrupts & deleg_to_hs & -hs_enabled; + if (state.v && enabled_interrupts == 0) { + // VS-ints have least priority and can only be taken with virt enabled + const reg_t deleg_to_vs = state.hideleg->read(); + const reg_t vs_enabled = state.prv < PRV_S || (state.prv == PRV_S && sie); + enabled_interrupts = pending_interrupts & deleg_to_vs & -vs_enabled; + } + } + + if (!state.debug_mode && enabled_interrupts) { + // nonstandard interrupts have highest priority + if (enabled_interrupts >> (IRQ_M_EXT + 1)) + enabled_interrupts = enabled_interrupts >> (IRQ_M_EXT + 1) << (IRQ_M_EXT + 1); + // standard interrupt priority is MEI, MSI, MTI, SEI, SSI, STI + else if (enabled_interrupts & MIP_MEIP) + enabled_interrupts = MIP_MEIP; + else if (enabled_interrupts & MIP_MSIP) + enabled_interrupts = MIP_MSIP; + else if (enabled_interrupts & MIP_MTIP) + enabled_interrupts = MIP_MTIP; + else if (enabled_interrupts & MIP_SEIP) + enabled_interrupts = MIP_SEIP; + else if (enabled_interrupts & MIP_SSIP) + enabled_interrupts = MIP_SSIP; + else if (enabled_interrupts & MIP_STIP) + enabled_interrupts = MIP_STIP; + else if (enabled_interrupts & MIP_VSEIP) + enabled_interrupts = MIP_VSEIP; + else if (enabled_interrupts & MIP_VSSIP) + enabled_interrupts = MIP_VSSIP; + else if (enabled_interrupts & MIP_VSTIP) + enabled_interrupts = MIP_VSTIP; + else + abort(); + + throw trap_t(((reg_t)1 << (isa->get_max_xlen() - 1)) | ctz(enabled_interrupts)); + } +} + +reg_t processor_t::legalize_privilege(reg_t prv) +{ + assert(prv <= PRV_M); + + if (!extension_enabled('U')) + return PRV_M; + + if (prv == PRV_HS || (prv == PRV_S && !extension_enabled('S'))) + return PRV_U; + + return prv; +} + +void processor_t::set_privilege(reg_t prv) +{ + mmu->flush_tlb(); + state.prv = legalize_privilege(prv); +} + +void processor_t::set_virt(bool virt) +{ + reg_t tmp, mask; + + if (state.prv == PRV_M) + return; + + if (state.v != virt) { + /* + * Ideally, we should flush TLB here but we don't need it because + * set_virt() is always used in conjucter with set_privilege() and + * set_privilege() will flush TLB unconditionally. + * + * The virtualized sstatus register also relies on this TLB flush, + * since changing V might change sstatus.MXR and sstatus.SUM. + */ + state.v = virt; + } +} + +void processor_t::enter_debug_mode(uint8_t cause) +{ + state.debug_mode = true; + state.dcsr->write_cause_and_prv(cause, state.prv); + set_privilege(PRV_M); + state.dpc->write(state.pc); + state.pc = DEBUG_ROM_ENTRY; +} + +void processor_t::debug_output_log(std::stringstream *s) +{ + if (log_file == stderr) { + std::ostream out(sout_.rdbuf()); + out << s->str(); // handles command line options -d -s -l + } else { + fputs(s->str().c_str(), log_file); // handles command line option --log + } +} + +void processor_t::take_trap(trap_t& t, reg_t epc) +{ + unsigned max_xlen = isa->get_max_xlen(); + + if (debug) { + std::stringstream s; // first put everything in a string, later send it to output + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << ": exception " << t.name() << ", epc 0x" + << std::hex << std::setfill('0') << std::setw(max_xlen/4) << zext(epc, max_xlen) << std::endl; + if (t.has_tval()) + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << ": tval 0x" << std::hex << std::setfill('0') << std::setw(max_xlen / 4) + << zext(t.get_tval(), max_xlen) << std::endl; + debug_output_log(&s); + } + + if (state.debug_mode) { + if (t.cause() == CAUSE_BREAKPOINT) { + state.pc = DEBUG_ROM_ENTRY; + } else { + state.pc = DEBUG_ROM_TVEC; + } + return; + } + + if (t.cause() == CAUSE_BREAKPOINT && ( + (state.prv == PRV_M && state.dcsr->ebreakm) || + (state.prv == PRV_S && state.dcsr->ebreaks) || + (state.prv == PRV_U && state.dcsr->ebreaku))) { + enter_debug_mode(DCSR_CAUSE_SWBP); + return; + } + + // By default, trap to M-mode, unless delegated to HS-mode or VS-mode + reg_t vsdeleg, hsdeleg; + reg_t bit = t.cause(); + bool curr_virt = state.v; + bool interrupt = (bit & ((reg_t)1 << (max_xlen - 1))) != 0; + if (interrupt) { + vsdeleg = (curr_virt && state.prv <= PRV_S) ? state.hideleg->read() : 0; + hsdeleg = (state.prv <= PRV_S) ? state.mideleg->read() : 0; + bit &= ~((reg_t)1 << (max_xlen - 1)); + } else { + vsdeleg = (curr_virt && state.prv <= PRV_S) ? (state.medeleg->read() & state.hedeleg->read()) : 0; + hsdeleg = (state.prv <= PRV_S) ? state.medeleg->read() : 0; + } + if (state.prv <= PRV_S && bit < max_xlen && ((vsdeleg >> bit) & 1)) { + // Handle the trap in VS-mode + reg_t vector = (state.vstvec->read() & 1) && interrupt ? 4 * bit : 0; + state.pc = (state.vstvec->read() & ~(reg_t)1) + vector; + state.vscause->write((interrupt) ? (t.cause() - 1) : t.cause()); + state.vsepc->write(epc); + state.vstval->write(t.get_tval()); + + reg_t s = state.sstatus->read(); + s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); + s = set_field(s, MSTATUS_SPP, state.prv); + s = set_field(s, MSTATUS_SIE, 0); + state.sstatus->write(s); + set_privilege(PRV_S); + } else if (state.prv <= PRV_S && bit < max_xlen && ((hsdeleg >> bit) & 1)) { + // Handle the trap in HS-mode + set_virt(false); + reg_t vector = (state.stvec->read() & 1) && interrupt ? 4 * bit : 0; + state.pc = (state.stvec->read() & ~(reg_t)1) + vector; + state.scause->write(t.cause()); + state.sepc->write(epc); + state.stval->write(t.get_tval()); + state.htval->write(t.get_tval2()); + state.htinst->write(t.get_tinst()); + + reg_t s = state.sstatus->read(); + s = set_field(s, MSTATUS_SPIE, get_field(s, MSTATUS_SIE)); + s = set_field(s, MSTATUS_SPP, state.prv); + s = set_field(s, MSTATUS_SIE, 0); + state.sstatus->write(s); + if (extension_enabled('H')) { + s = state.hstatus->read(); + if (curr_virt) + s = set_field(s, HSTATUS_SPVP, state.prv); + s = set_field(s, HSTATUS_SPV, curr_virt); + s = set_field(s, HSTATUS_GVA, t.has_gva()); + state.hstatus->write(s); + } + set_privilege(PRV_S); + } else { + // Handle the trap in M-mode + set_virt(false); + reg_t vector = (state.mtvec->read() & 1) && interrupt ? 4 * bit : 0; + state.pc = (state.mtvec->read() & ~(reg_t)1) + vector; + state.mepc->write(epc); + state.mcause->write(t.cause()); + state.mtval->write(t.get_tval()); + state.mtval2->write(t.get_tval2()); + state.mtinst->write(t.get_tinst()); + + reg_t s = state.mstatus->read(); + s = set_field(s, MSTATUS_MPIE, get_field(s, MSTATUS_MIE)); + s = set_field(s, MSTATUS_MPP, state.prv); + s = set_field(s, MSTATUS_MIE, 0); + s = set_field(s, MSTATUS_MPV, curr_virt); + s = set_field(s, MSTATUS_GVA, t.has_gva()); + state.mstatus->write(s); + set_privilege(PRV_M); + } +} + +void processor_t::disasm(insn_t insn) +{ + uint64_t bits = insn.bits() & ((1ULL << (8 * insn_length(insn.bits()))) - 1); + if (last_pc != state.pc || last_bits != bits) { + std::stringstream s; // first put everything in a string, later send it to output + +#ifdef RISCV_ENABLE_COMMITLOG + const char* sym = get_symbol(state.pc); + if (sym != nullptr) + { + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << ": >>>> " << sym << std::endl; + } +#endif + + if (executions != 1) { + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << ": Executed " << executions << " times" << std::endl; + } + + unsigned max_xlen = isa->get_max_xlen(); + + s << "core " << std::dec << std::setfill(' ') << std::setw(3) << id + << std::hex << ": 0x" << std::setfill('0') << std::setw(max_xlen / 4) + << zext(state.pc, max_xlen) << " (0x" << std::setw(8) << bits << ") " + << disassembler->disassemble(insn) << std::endl; + + debug_output_log(&s); + + last_pc = state.pc; + last_bits = bits; + executions = 1; + } else { + executions++; + } +} + +int processor_t::paddr_bits() +{ + unsigned max_xlen = isa->get_max_xlen(); + assert(xlen == max_xlen); + return max_xlen == 64 ? 50 : 34; +} + +void processor_t::put_csr(int which, reg_t val) +{ + val = zext_xlen(val); + auto search = state.csrmap.find(which); + if (search != state.csrmap.end()) { + search->second->write(val); + return; + } +} + +// Note that get_csr is sometimes called when read side-effects should not +// be actioned. In other words, Spike cannot currently support CSRs with +// side effects on reads. +reg_t processor_t::get_csr(int which, insn_t insn, bool write, bool peek) +{ + auto search = state.csrmap.find(which); + if (search != state.csrmap.end()) { + if (!peek) + search->second->verify_permissions(insn, write); + return search->second->read(); + } + // If we get here, the CSR doesn't exist. Unimplemented CSRs always throw + // illegal-instruction exceptions, not virtual-instruction exceptions. + throw trap_illegal_instruction(insn.bits()); +} + +reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc) +{ + throw trap_illegal_instruction(insn.bits()); +} + +insn_func_t processor_t::decode_insn(insn_t insn) +{ + // look up opcode in hash table + size_t idx = insn.bits() % OPCODE_CACHE_SIZE; + insn_desc_t desc = opcode_cache[idx]; + + bool rve = extension_enabled('E'); + + if (unlikely(insn.bits() != desc.match || !desc.func(xlen, rve))) { + // fall back to linear search + int cnt = 0; + insn_desc_t* p = &instructions[0]; + while ((insn.bits() & p->mask) != p->match || !desc.func(xlen, rve)) + p++, cnt++; + desc = *p; + + if (p->mask != 0 && p > &instructions[0]) { + if (p->match != (p - 1)->match && p->match != (p + 1)->match) { + // move to front of opcode list to reduce miss penalty + while (--p >= &instructions[0]) + *(p + 1) = *p; + instructions[0] = desc; + } + } + + opcode_cache[idx] = desc; + opcode_cache[idx].match = insn.bits(); + } + + return desc.func(xlen, rve); +} + +void processor_t::register_insn(insn_desc_t desc) +{ + instructions.push_back(desc); +} + +void processor_t::build_opcode_map() +{ + struct cmp { + bool operator()(const insn_desc_t& lhs, const insn_desc_t& rhs) { + if (lhs.match == rhs.match) + return lhs.mask > rhs.mask; + return lhs.match > rhs.match; + } + }; + std::sort(instructions.begin(), instructions.end(), cmp()); + + for (size_t i = 0; i < OPCODE_CACHE_SIZE; i++) + opcode_cache[i] = insn_desc_t::illegal(); +} + +void processor_t::register_extension(extension_t* x) +{ + for (auto insn : x->get_instructions()) + register_insn(insn); + build_opcode_map(); + + for (auto disasm_insn : x->get_disasms()) + disassembler->add_insn(disasm_insn); + + if (!custom_extensions.insert(std::make_pair(x->name(), x)).second) { + fprintf(stderr, "extensions must have unique names (got two named \"%s\"!)\n", x->name()); + abort(); + } + x->set_processor(this); +} + +void processor_t::register_base_instructions() +{ + #define DECLARE_INSN(name, match, mask) \ + insn_bits_t name##_match = (match), name##_mask = (mask); \ + bool name##_supported = true; + + #include "encoding.h" + #undef DECLARE_INSN + + #define DECLARE_OVERLAP_INSN(name, ext) { name##_supported &= isa->extension_enabled(ext); } + #include "overlap_list.h" + #undef DECLARE_OVERLAP_INSN + + #define DEFINE_INSN(name) \ + extern reg_t rv32i_##name(processor_t*, insn_t, reg_t); \ + extern reg_t rv64i_##name(processor_t*, insn_t, reg_t); \ + extern reg_t rv32e_##name(processor_t*, insn_t, reg_t); \ + extern reg_t rv64e_##name(processor_t*, insn_t, reg_t); \ + register_insn((insn_desc_t) { \ + name##_supported, \ + name##_match, \ + name##_mask, \ + rv32i_##name, \ + rv64i_##name, \ + rv32e_##name, \ + rv64e_##name}); + #include "insn_list.h" + #undef DEFINE_INSN + + // terminate instruction list with a catch-all + register_insn(insn_desc_t::illegal()); + + build_opcode_map(); +} + +bool processor_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + switch (addr) + { + case 0: + if (len <= 4) { + memset(bytes, 0, len); + bytes[0] = get_field(state.mip->read(), MIP_MSIP); + return true; + } + break; + } + + return false; +} + +bool processor_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + switch (addr) + { + case 0: + if (len <= 4) { + state.mip->write_with_mask(MIP_MSIP, bytes[0] << IRQ_M_SOFT); + return true; + } + break; + } + + return false; +} + +void processor_t::trigger_updated(const std::vector &triggers) +{ + mmu->flush_tlb(); + mmu->check_triggers_fetch = false; + mmu->check_triggers_load = false; + mmu->check_triggers_store = false; + + for (auto trigger : triggers) { + if (trigger->execute()) { + mmu->check_triggers_fetch = true; + } + if (trigger->load()) { + mmu->check_triggers_load = true; + } + if (trigger->store()) { + mmu->check_triggers_store = true; + } + } +} diff --git a/vendor/riscv-isa-sim/riscv/processor.h b/vendor/riscv-isa-sim/riscv/processor.h new file mode 100644 index 00000000..96fdc54c --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/processor.h @@ -0,0 +1,469 @@ +// See LICENSE for license details. +#ifndef _RISCV_PROCESSOR_H +#define _RISCV_PROCESSOR_H + +#include "decode.h" +#include "config.h" +#include "trap.h" +#include "abstract_device.h" +#include +#include +#include +#include +#include +#include "debug_rom_defines.h" +#include "entropy_source.h" +#include "csrs.h" +#include "isa_parser.h" +#include "triggers.h" + +class processor_t; +class mmu_t; +typedef reg_t (*insn_func_t)(processor_t*, insn_t, reg_t); +class simif_t; +class trap_t; +class extension_t; +class disassembler_t; + +reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc); + +struct insn_desc_t +{ + bool supported; + insn_bits_t match; + insn_bits_t mask; + insn_func_t rv32i; + insn_func_t rv64i; + insn_func_t rv32e; + insn_func_t rv64e; + + insn_func_t func(int xlen, bool rve) + { + if (!supported) + return NULL; + + if (rve) + return xlen == 64 ? rv64e : rv32e; + else + return xlen == 64 ? rv64i : rv32i; + } + + static insn_desc_t illegal() + { + return {true, 0, 0, &illegal_instruction, &illegal_instruction, &illegal_instruction, &illegal_instruction}; + } +}; + +// regnum, data +typedef std::unordered_map commit_log_reg_t; + +// addr, value, size +typedef std::vector> commit_log_mem_t; + +enum VRM{ + RNU = 0, + RNE, + RDN, + ROD, + INVALID_RM +}; + +template +struct type_usew_t; + +template<> +struct type_usew_t<8> +{ + using type=uint8_t; +}; + +template<> +struct type_usew_t<16> +{ + using type=uint16_t; +}; + +template<> +struct type_usew_t<32> +{ + using type=uint32_t; +}; + +template<> +struct type_usew_t<64> +{ + using type=uint64_t; +}; + +template +struct type_sew_t; + +template<> +struct type_sew_t<8> +{ + using type=int8_t; +}; + +template<> +struct type_sew_t<16> +{ + using type=int16_t; +}; + +template<> +struct type_sew_t<32> +{ + using type=int32_t; +}; + +template<> +struct type_sew_t<64> +{ + using type=int64_t; +}; + + +// architectural state of a RISC-V hart +struct state_t +{ + void reset(processor_t* const proc, reg_t max_isa); + + reg_t pc; + regfile_t XPR; + regfile_t FPR; + + // control and status registers + std::unordered_map csrmap; + reg_t prv; // TODO: Can this be an enum instead? + bool v; + misa_csr_t_p misa; + mstatus_csr_t_p mstatus; + csr_t_p mepc; + csr_t_p mtval; + csr_t_p mtvec; + csr_t_p mcause; + wide_counter_csr_t_p minstret; + wide_counter_csr_t_p mcycle; + mie_csr_t_p mie; + mip_csr_t_p mip; + csr_t_p medeleg; + csr_t_p mideleg; + csr_t_p mcounteren; + csr_t_p scounteren; + csr_t_p sepc; + csr_t_p stval; + csr_t_p stvec; + virtualized_csr_t_p satp; + csr_t_p scause; + + csr_t_p mtval2; + csr_t_p mtinst; + csr_t_p hstatus; + csr_t_p hideleg; + csr_t_p hedeleg; + csr_t_p hcounteren; + csr_t_p htval; + csr_t_p htinst; + csr_t_p hgatp; + sstatus_csr_t_p sstatus; + vsstatus_csr_t_p vsstatus; + csr_t_p vstvec; + csr_t_p vsepc; + csr_t_p vscause; + csr_t_p vstval; + csr_t_p vsatp; + + csr_t_p dpc; + dcsr_csr_t_p dcsr; + csr_t_p tselect; + tdata2_csr_t_p tdata2; + bool debug_mode; + + mseccfg_csr_t_p mseccfg; + + static const int max_pmp = 16; + pmpaddr_csr_t_p pmpaddr[max_pmp]; + + float_csr_t_p fflags; + float_csr_t_p frm; + + csr_t_p menvcfg; + csr_t_p senvcfg; + csr_t_p henvcfg; + + bool serialized; // whether timer CSRs are in a well-defined state + + // When true, execute a single instruction and then enter debug mode. This + // can only be set by executing dret. + enum { + STEP_NONE, + STEP_STEPPING, + STEP_STEPPED + } single_step; + +#ifdef RISCV_ENABLE_COMMITLOG + commit_log_reg_t log_reg_write; + commit_log_mem_t log_mem_read; + commit_log_mem_t log_mem_write; + reg_t last_inst_priv; + int last_inst_xlen; + int last_inst_flen; +#endif +}; + +typedef enum { + OPERATION_EXECUTE, + OPERATION_STORE, + OPERATION_LOAD, +} trigger_operation_t; + +// Count number of contiguous 1 bits starting from the LSB. +static int cto(reg_t val) +{ + int res = 0; + while ((val & 1) == 1) + val >>= 1, res++; + return res; +} + +// this class represents one processor in a RISC-V machine. +class processor_t : public abstract_device_t +{ +public: + processor_t(const isa_parser_t *isa, const char* varch, + simif_t* sim, uint32_t id, bool halt_on_reset, + FILE *log_file, std::ostream& sout_); // because of command line option --log and -s we need both + ~processor_t(); + + const isa_parser_t &get_isa() { return *isa; } + + void set_debug(bool value); + void set_histogram(bool value); +#ifdef RISCV_ENABLE_COMMITLOG + void enable_log_commits(); + bool get_log_commits_enabled() const { return log_commits_enabled; } +#endif + void reset(); + void step(size_t n); // run for n cycles + void put_csr(int which, reg_t val); + uint32_t get_id() const { return id; } + reg_t get_csr(int which, insn_t insn, bool write, bool peek = 0); + reg_t get_csr(int which) { return get_csr(which, insn_t(0), false, true); } + mmu_t* get_mmu() { return mmu; } + state_t* get_state() { return &state; } + unsigned get_xlen() const { return xlen; } + unsigned get_const_xlen() const { + // Any code that assumes a const xlen should use this method to + // document that assumption. If Spike ever changes to allow + // variable xlen, this method should be removed. + return xlen; + } + unsigned get_flen() const { + return extension_enabled('Q') ? 128 : + extension_enabled('D') ? 64 : + extension_enabled('F') ? 32 : 0; + } + extension_t* get_extension(); + extension_t* get_extension(const char* name); + bool any_custom_extensions() const { + return !custom_extensions.empty(); + } + bool extension_enabled(unsigned char ext) const { + if (ext >= 'A' && ext <= 'Z') + return state.misa->extension_enabled(ext); + else + return isa->extension_enabled(ext); + } + // Is this extension enabled? and abort if this extension can + // possibly be disabled dynamically. Useful for documenting + // assumptions about writable misa bits. + bool extension_enabled_const(unsigned char ext) const { + if (ext >= 'A' && ext <= 'Z') + return state.misa->extension_enabled_const(ext); + else + return isa->extension_enabled(ext); // assume this can't change + } + void set_impl(uint8_t impl, bool val) { impl_table[impl] = val; } + bool supports_impl(uint8_t impl) const { + return impl_table[impl]; + } + reg_t pc_alignment_mask() { + return ~(reg_t)(extension_enabled('C') ? 0 : 2); + } + void check_pc_alignment(reg_t pc) { + if (unlikely(pc & ~pc_alignment_mask())) + throw trap_instruction_address_misaligned(state.v, pc, 0, 0); + } + reg_t legalize_privilege(reg_t); + void set_privilege(reg_t); + void set_virt(bool); + void update_histogram(reg_t pc); + const disassembler_t* get_disassembler() { return disassembler; } + + FILE *get_log_file() { return log_file; } + + void register_insn(insn_desc_t); + void register_extension(extension_t*); + + // MMIO slave interface + bool load(reg_t addr, size_t len, uint8_t* bytes); + bool store(reg_t addr, size_t len, const uint8_t* bytes); + + // When true, display disassembly of each instruction that's executed. + bool debug; + // When true, take the slow simulation path. + bool slow_path(); + bool halted() { return state.debug_mode; } + enum { + HR_NONE, /* Halt request is inactive. */ + HR_REGULAR, /* Regular halt request/debug interrupt. */ + HR_GROUP /* Halt requested due to halt group. */ + } halt_request; + + void trigger_updated(const std::vector &triggers); + + void set_pmp_num(reg_t pmp_num); + void set_pmp_granularity(reg_t pmp_granularity); + void set_mmu_capability(int cap); + + const char* get_symbol(uint64_t addr); + +private: + const isa_parser_t * const isa; + + simif_t* sim; + mmu_t* mmu; // main memory is always accessed via the mmu + std::unordered_map custom_extensions; + disassembler_t* disassembler; + state_t state; + uint32_t id; + unsigned xlen; + bool histogram_enabled; + bool log_commits_enabled; + FILE *log_file; + std::ostream sout_; // needed for socket command interface -s, also used for -d and -l, but not for --log + bool halt_on_reset; + std::vector impl_table; + + std::vector instructions; + std::map pc_histogram; + + static const size_t OPCODE_CACHE_SIZE = 8191; + insn_desc_t opcode_cache[OPCODE_CACHE_SIZE]; + + void take_pending_interrupt() { take_interrupt(state.mip->read() & state.mie->read()); } + void take_interrupt(reg_t mask); // take first enabled interrupt in mask + void take_trap(trap_t& t, reg_t epc); // take an exception + void disasm(insn_t insn); // disassemble and print an instruction + int paddr_bits(); + + void enter_debug_mode(uint8_t cause); + + void debug_output_log(std::stringstream *s); // either output to interactive user or write to log file + + friend class mmu_t; + friend class clint_t; + friend class extension_t; + + void parse_varch_string(const char*); + void parse_priv_string(const char*); + void build_opcode_map(); + void register_base_instructions(); + insn_func_t decode_insn(insn_t insn); + + // Track repeated executions for processor_t::disasm() + uint64_t last_pc, last_bits, executions; +public: + entropy_source es; // Crypto ISE Entropy source. + + reg_t n_pmp; + reg_t lg_pmp_granularity; + reg_t pmp_tor_mask() { return -(reg_t(1) << (lg_pmp_granularity - PMP_SHIFT)); } + + class vectorUnit_t { + public: + processor_t* p; + void *reg_file; + char reg_referenced[NVPR]; + int setvl_count; + reg_t vlmax; + reg_t vlenb; + csr_t_p vxsat; + vector_csr_t_p vxrm, vstart, vl, vtype; + reg_t vma, vta; + reg_t vsew; + float vflmul; + reg_t ELEN, VLEN; + bool vill; + bool vstart_alu; + + // vector element for varies SEW + template + T& elt(reg_t vReg, reg_t n, bool is_write = false){ + assert(vsew != 0); + assert((VLEN >> 3)/sizeof(T) > 0); + reg_t elts_per_reg = (VLEN >> 3) / (sizeof(T)); + vReg += n / elts_per_reg; + n = n % elts_per_reg; +#ifdef WORDS_BIGENDIAN + // "V" spec 0.7.1 requires lower indices to map to lower significant + // bits when changing SEW, thus we need to index from the end on BE. + n ^= elts_per_reg - 1; +#endif + reg_referenced[vReg] = 1; + +#ifdef RISCV_ENABLE_COMMITLOG + if (is_write) + p->get_state()->log_reg_write[((vReg) << 4) | 2] = {0, 0}; +#endif + + T *regStart = (T*)((char*)reg_file + vReg * (VLEN >> 3)); + return regStart[n]; + } + public: + + void reset(); + + vectorUnit_t(): + p(0), + reg_file(0), + reg_referenced{0}, + setvl_count(0), + vlmax(0), + vlenb(0), + vxsat(0), + vxrm(0), + vstart(0), + vl(0), + vtype(0), + vma(0), + vta(0), + vsew(0), + vflmul(0), + ELEN(0), + VLEN(0), + vill(false), + vstart_alu(false) { + } + + ~vectorUnit_t(){ + free(reg_file); + reg_file = 0; + } + + reg_t set_vl(int rd, int rs1, reg_t reqVL, reg_t newType); + + reg_t get_vlen() { return VLEN; } + reg_t get_elen() { return ELEN; } + reg_t get_slen() { return VLEN; } + + VRM get_vround_mode() { + return (VRM)(vxrm->read()); + } + }; + + vectorUnit_t VU; + triggers::module_t TM; +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/remote_bitbang.cc b/vendor/riscv-isa-sim/riscv/remote_bitbang.cc new file mode 100644 index 00000000..8453e85a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/remote_bitbang.cc @@ -0,0 +1,187 @@ +#include +#include +#include +#include +#include +#include + +#ifndef AF_INET +#include +#endif +#ifndef INADDR_ANY +#include +#endif + +#include +#include +#include + +#include "remote_bitbang.h" + +#if 1 +# define D(x) x +#else +# define D(x) +#endif + +/////////// remote_bitbang_t + +remote_bitbang_t::remote_bitbang_t(uint16_t port, jtag_dtm_t *tap) : + tap(tap), + socket_fd(0), + client_fd(0), + recv_start(0), + recv_end(0) +{ + socket_fd = socket(AF_INET, SOCK_STREAM, 0); + if (socket_fd == -1) { + fprintf(stderr, "remote_bitbang failed to make socket: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + fcntl(socket_fd, F_SETFL, O_NONBLOCK); + int reuseaddr = 1; + if (setsockopt(socket_fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr, + sizeof(int)) == -1) { + fprintf(stderr, "remote_bitbang failed setsockopt: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + struct sockaddr_in addr; + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = INADDR_ANY; + addr.sin_port = htons(port); + + if (bind(socket_fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) { + fprintf(stderr, "remote_bitbang failed to bind socket: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + if (listen(socket_fd, 1) == -1) { + fprintf(stderr, "remote_bitbang failed to listen on socket: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + socklen_t addrlen = sizeof(addr); + if (getsockname(socket_fd, (struct sockaddr *) &addr, &addrlen) == -1) { + fprintf(stderr, "remote_bitbang getsockname failed: %s (%d)\n", + strerror(errno), errno); + abort(); + } + + printf("Listening for remote bitbang connection on port %d.\n", + ntohs(addr.sin_port)); + fflush(stdout); +} + +void remote_bitbang_t::accept() +{ + client_fd = ::accept(socket_fd, NULL, NULL); + if (client_fd == -1) { + if (errno == EAGAIN) { + // No client waiting to connect right now. + } else { + fprintf(stderr, "failed to accept on socket: %s (%d)\n", strerror(errno), + errno); + abort(); + } + } else { + fcntl(client_fd, F_SETFL, O_NONBLOCK); + } +} + +void remote_bitbang_t::tick() +{ + if (client_fd > 0) { + execute_commands(); + } else { + this->accept(); + } +} + +void remote_bitbang_t::execute_commands() +{ + static char send_buf[buf_size]; + unsigned total_processed = 0; + bool quit = false; + bool in_rti = tap->state() == RUN_TEST_IDLE; + bool entered_rti = false; + while (1) { + if (recv_start < recv_end) { + unsigned send_offset = 0; + while (recv_start < recv_end) { + uint8_t command = recv_buf[recv_start]; + + switch (command) { + case 'B': /* fprintf(stderr, "*BLINK*\n"); */ break; + case 'b': /* fprintf(stderr, "_______\n"); */ break; + case 'r': tap->reset(); break; + case '0': tap->set_pins(0, 0, 0); break; + case '1': tap->set_pins(0, 0, 1); break; + case '2': tap->set_pins(0, 1, 0); break; + case '3': tap->set_pins(0, 1, 1); break; + case '4': tap->set_pins(1, 0, 0); break; + case '5': tap->set_pins(1, 0, 1); break; + case '6': tap->set_pins(1, 1, 0); break; + case '7': tap->set_pins(1, 1, 1); break; + case 'R': send_buf[send_offset++] = tap->tdo() ? '1' : '0'; break; + case 'Q': quit = true; break; + default: + fprintf(stderr, "remote_bitbang got unsupported command '%c'\n", + command); + } + recv_start++; + total_processed++; + if (!in_rti && tap->state() == RUN_TEST_IDLE) { + entered_rti = true; + break; + } + in_rti = false; + } + unsigned sent = 0; + while (sent < send_offset) { + ssize_t bytes = write(client_fd, send_buf + sent, send_offset); + if (bytes == -1) { + fprintf(stderr, "failed to write to socket: %s (%d)\n", strerror(errno), errno); + abort(); + } + sent += bytes; + } + } + + if (total_processed > buf_size || quit || entered_rti) { + // Don't go forever, because that could starve the main simulation. + break; + } + + recv_start = 0; + recv_end = read(client_fd, recv_buf, buf_size); + + if (recv_end == -1) { + if (errno == EAGAIN) { + break; + } else { + fprintf(stderr, "remote_bitbang failed to read on socket: %s (%d)\n", + strerror(errno), errno); + abort(); + } + } + + if (quit) { + fprintf(stderr, "Remote Bitbang received 'Q'\n"); + } + + if (recv_end == 0 || quit) { + // The remote disconnected. + fprintf(stderr, "Received nothing. Quitting.\n"); + close(client_fd); + client_fd = 0; + break; + } + } +} diff --git a/vendor/riscv-isa-sim/riscv/remote_bitbang.h b/vendor/riscv-isa-sim/riscv/remote_bitbang.h new file mode 100644 index 00000000..1db4d550 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/remote_bitbang.h @@ -0,0 +1,34 @@ +#ifndef REMOTE_BITBANG_H +#define REMOTE_BITBANG_H + +#include + +#include "jtag_dtm.h" + +class remote_bitbang_t +{ +public: + // Create a new server, listening for connections from localhost on the given + // port. + remote_bitbang_t(uint16_t port, jtag_dtm_t *tap); + + // Do a bit of work. + void tick(); + +private: + jtag_dtm_t *tap; + + int socket_fd; + int client_fd; + + static const ssize_t buf_size = 64 * 1024; + char recv_buf[buf_size]; + ssize_t recv_start, recv_end; + + // Check for a client connecting, and accept if there is one. + void accept(); + // Execute any commands the client has for us. + void execute_commands(); +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/riscv.ac b/vendor/riscv-isa-sim/riscv/riscv.ac new file mode 100644 index 00000000..9d14335d --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/riscv.ac @@ -0,0 +1,65 @@ +AC_LANG_CPLUSPLUS + +AX_BOOST_BASE([1.53]) +AX_BOOST_ASIO +AX_BOOST_REGEX + +AC_CHECK_LIB([boost_system], [main], [], []) + +AC_CHECK_LIB([boost_regex], [main], [], []) + +AC_ARG_WITH(isa, + [AS_HELP_STRING([--with-isa=RV64IMAFDC], + [Sets the default RISC-V ISA])], + AC_DEFINE_UNQUOTED([DEFAULT_ISA], "$withval", [Default value for --isa switch]), + AC_DEFINE_UNQUOTED([DEFAULT_ISA], "RV64IMAFDC", [Default value for --isa switch])) + +AC_ARG_WITH(priv, + [AS_HELP_STRING([--with-priv=MSU], + [Sets the default RISC-V privilege modes supported])], + AC_DEFINE_UNQUOTED([DEFAULT_PRIV], "$withval", [Default value for --priv switch]), + AC_DEFINE_UNQUOTED([DEFAULT_PRIV], "MSU", [Default value for --priv switch])) + +AC_ARG_WITH(varch, + [AS_HELP_STRING([--with-varch=vlen:128,elen:64], + [Sets the default vector config])], + AC_DEFINE_UNQUOTED([DEFAULT_VARCH], "$withval", [Default value for --varch switch]), + AC_DEFINE_UNQUOTED([DEFAULT_VARCH], ["vlen:128,elen:64"], [Default value for --varch switch])) + +AC_ARG_WITH(target, + [AS_HELP_STRING([--with-target=riscv64-unknown-elf], + [Sets the default target config])], + AC_DEFINE_UNQUOTED([TARGET_ARCH], "$withval", [Default value for --target switch]), + AC_DEFINE_UNQUOTED([TARGET_ARCH], ["riscv64-unknown-elf"], [Default value for --target switch])) + +AC_SEARCH_LIBS([dlopen], [dl dld], [ + AC_DEFINE([HAVE_DLOPEN], [], [Dynamic library loading is supported]) + AC_SUBST([HAVE_DLOPEN], [yes]) +]) + +AC_CHECK_LIB(pthread, pthread_create, [], [AC_MSG_ERROR([libpthread is required])]) + +AC_ARG_ENABLE([commitlog], AS_HELP_STRING([--enable-commitlog], [Enable commit log generation])) +AS_IF([test "x$enable_commitlog" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_COMMITLOG],,[Enable commit log generation]) +]) + +AC_ARG_ENABLE([histogram], AS_HELP_STRING([--enable-histogram], [Enable PC histogram generation])) +AS_IF([test "x$enable_histogram" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_HISTOGRAM],,[Enable PC histogram generation]) +]) + +AC_ARG_ENABLE([dirty], AS_HELP_STRING([--enable-dirty], [Enable hardware management of PTE accessed and dirty bits])) +AS_IF([test "x$enable_dirty" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_DIRTY],,[Enable hardware management of PTE accessed and dirty bits]) +]) + +AC_ARG_ENABLE([misaligned], AS_HELP_STRING([--enable-misaligned], [Enable hardware support for misaligned loads and stores])) +AS_IF([test "x$enable_misaligned" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_MISALIGNED],,[Enable hardware support for misaligned loads and stores]) +]) + +AC_ARG_ENABLE([dual-endian], AS_HELP_STRING([--enable-dual-endian], [Enable support for running target in either endianness])) +AS_IF([test "x$enable_dual_endian" = "xyes"], [ + AC_DEFINE([RISCV_ENABLE_DUAL_ENDIAN],,[Enable support for running target in either endianness]) +]) diff --git a/vendor/riscv-isa-sim/riscv/riscv.mk.in b/vendor/riscv-isa-sim/riscv/riscv.mk.in new file mode 100644 index 00000000..0c6b977f --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/riscv.mk.in @@ -0,0 +1,1301 @@ +get_insn_list = $(shell grep ^DECLARE_INSN $(1) | sed 's/DECLARE_INSN(\(.*\),.*,.*)/\1/') +get_opcode = $(shell grep ^DECLARE_INSN.*\\\<$(2)\\\> $(1) | sed 's/DECLARE_INSN(.*,\(.*\),.*)/\1/') + +riscv_subproject_deps = \ + fdt \ + softfloat \ + +riscv_install_prog_srcs = \ + +riscv_hdrs = \ + abstract_device.h \ + common.h \ + decode.h \ + devices.h \ + dts.h \ + mmu.h \ + cfg.h \ + processor.h \ + sim.h \ + simif.h \ + trap.h \ + encoding.h \ + cachesim.h \ + memtracer.h \ + mmio_plugin.h \ + tracer.h \ + extension.h \ + rocc.h \ + insn_template.h \ + debug_module.h \ + debug_rom_defines.h \ + remote_bitbang.h \ + jtag_dtm.h \ + csrs.h \ + triggers.h \ + +riscv_install_hdrs = mmio_plugin.h + +riscv_precompiled_hdrs = \ + insn_template.h \ + +riscv_srcs = \ + isa_parser.cc \ + processor.cc \ + execute.cc \ + dts.cc \ + sim.cc \ + interactive.cc \ + cachesim.cc \ + mmu.cc \ + extension.cc \ + extensions.cc \ + rocc.cc \ + devices.cc \ + rom.cc \ + clint.cc \ + debug_module.cc \ + remote_bitbang.cc \ + jtag_dtm.cc \ + csrs.cc \ + triggers.cc \ + $(riscv_gen_srcs) \ + +riscv_test_srcs = + +riscv_gen_hdrs = \ + insn_list.h \ + + +riscv_insn_ext_i = \ + add \ + addi \ + addiw \ + addw \ + and \ + andi \ + auipc \ + beq \ + bge \ + bgeu \ + blt \ + bltu \ + bne \ + jal \ + jalr \ + lb \ + lbu \ + ld \ + lh \ + lhu \ + lui \ + lw \ + lwu \ + or \ + ori \ + sb \ + sd \ + sh \ + sll \ + slli \ + slliw \ + sllw \ + slt \ + slti \ + sltiu \ + sltu \ + sra \ + srai \ + sraiw \ + sraw \ + srl \ + srli \ + srliw \ + srlw \ + sub \ + subw \ + sw \ + xor \ + xori \ + fence \ + fence_i \ + +riscv_insn_ext_a = \ + amoadd_d \ + amoadd_w \ + amoand_d \ + amoand_w \ + amomax_d \ + amomaxu_d \ + amomaxu_w \ + amomax_w \ + amomin_d \ + amominu_d \ + amominu_w \ + amomin_w \ + amoor_d \ + amoor_w \ + amoswap_d \ + amoswap_w \ + amoxor_d \ + amoxor_w \ + lr_d \ + lr_w \ + sc_d \ + sc_w \ + +riscv_insn_ext_c = \ + c_add \ + c_addi \ + c_addi4spn \ + c_addw \ + c_and \ + c_andi \ + c_beqz \ + c_bnez \ + c_ebreak \ + c_fld \ + c_fldsp \ + c_flw \ + c_flwsp \ + c_fsd \ + c_fsdsp \ + c_fsw \ + c_fswsp \ + c_j \ + c_jal \ + c_jalr \ + c_jr \ + c_li \ + c_lui \ + c_lw \ + c_lwsp \ + c_mv \ + c_or \ + c_slli \ + c_srai \ + c_srli \ + c_sub \ + c_subw \ + c_sw \ + c_swsp \ + c_xor \ + +riscv_insn_ext_m = \ + div \ + divu \ + divuw \ + divw \ + mul \ + mulh \ + mulhsu \ + mulhu \ + mulw \ + rem \ + remu \ + remuw \ + remw \ + +riscv_insn_ext_f = \ + fadd_s \ + fclass_s \ + fcvt_l_s \ + fcvt_lu_s \ + fcvt_s_l \ + fcvt_s_lu \ + fcvt_s_w \ + fcvt_s_wu \ + fcvt_w_s \ + fcvt_wu_s \ + fdiv_s \ + feq_s \ + fle_s \ + flt_s \ + flw \ + fmadd_s \ + fmax_s \ + fmin_s \ + fmsub_s \ + fmul_s \ + fmv_w_x \ + fmv_x_w \ + fnmadd_s \ + fnmsub_s \ + fsgnj_s \ + fsgnjn_s \ + fsgnjx_s \ + fsqrt_s \ + fsub_s \ + fsw \ + +riscv_insn_ext_d = \ + fadd_d \ + fclass_d \ + fcvt_d_l \ + fcvt_d_lu \ + fcvt_d_q \ + fcvt_d_s \ + fcvt_d_w \ + fcvt_d_wu \ + fcvt_l_d \ + fcvt_lu_d \ + fcvt_s_d \ + fcvt_w_d \ + fcvt_wu_d \ + fdiv_d \ + feq_d \ + fld \ + fle_d \ + flt_d \ + fmadd_d \ + fmax_d \ + fmin_d \ + fmsub_d \ + fmul_d \ + fmv_d_x \ + fmv_x_d \ + fnmadd_d \ + fnmsub_d \ + fsd \ + fsgnj_d \ + fsgnjn_d \ + fsgnjx_d \ + fsqrt_d \ + fsub_d \ + +riscv_insn_ext_zfh = \ + fadd_h \ + fclass_h \ + fcvt_l_h \ + fcvt_lu_h \ + fcvt_d_h \ + fcvt_h_d \ + fcvt_h_l \ + fcvt_h_lu \ + fcvt_h_q \ + fcvt_h_s \ + fcvt_h_w \ + fcvt_h_wu \ + fcvt_q_h \ + fcvt_s_h \ + fcvt_w_h \ + fcvt_wu_h \ + fdiv_h \ + feq_h \ + fle_h \ + flh \ + flt_h \ + fmadd_h \ + fmax_h \ + fmin_h \ + fmsub_h \ + fmul_h \ + fmv_h_x \ + fmv_x_h \ + fnmadd_h \ + fnmsub_h \ + fsgnj_h \ + fsgnjn_h \ + fsgnjx_h \ + fsh \ + fsqrt_h \ + fsub_h \ + +riscv_insn_ext_q = \ + fadd_q \ + fclass_q \ + fcvt_l_q \ + fcvt_lu_q \ + fcvt_q_d \ + fcvt_q_l \ + fcvt_q_lu \ + fcvt_q_s \ + fcvt_q_w \ + fcvt_q_wu \ + fcvt_s_q \ + fcvt_w_q \ + fcvt_wu_q \ + fdiv_q \ + feq_q \ + fle_q \ + flq \ + flt_q \ + fmadd_q \ + fmax_q \ + fmin_q \ + fmsub_q \ + fmul_q \ + fnmadd_q \ + fnmsub_q \ + fsgnj_q \ + fsgnjn_q \ + fsgnjx_q \ + fsq \ + fsqrt_q \ + fsub_q \ + +riscv_insn_ext_b = \ + add_uw \ + andn \ + bdecompress \ + bdecompressw \ + bcompress \ + bcompressw \ + bfp \ + bfpw \ + bmatflip \ + bmator \ + bmatxor \ + sh1add \ + sh1add_uw \ + sh2add \ + sh2add_uw \ + sh3add \ + sh3add_uw \ + clmul \ + clmulh \ + clmulr \ + clz \ + clzw \ + cmix \ + cmov \ + crc32_b \ + crc32c_b \ + crc32c_d \ + crc32c_h \ + crc32c_w \ + crc32_d \ + crc32_h \ + crc32_w \ + ctz \ + ctzw \ + fsl \ + fslw \ + fsr \ + fsri \ + fsriw \ + fsrw \ + gorc \ + gorci \ + gorciw \ + gorcw \ + grev \ + grevi \ + greviw \ + grevw \ + max \ + maxu \ + min \ + minu \ + orn \ + pack \ + packh \ + packu \ + packuw \ + packw \ + cpop \ + cpopw \ + rol \ + rolw \ + ror \ + rori \ + roriw \ + rorw \ + bclr \ + bclri \ + bext \ + bexti \ + binv \ + binvi \ + bset \ + bseti \ + sext_b \ + sext_h \ + shfl \ + shfli \ + shflw \ + slli_uw \ + slo \ + sloi \ + sloiw \ + slow \ + sro \ + sroi \ + sroiw \ + srow \ + unshfl \ + unshfli \ + unshflw \ + xnor \ + xperm4 \ + xperm8 \ + xperm16 \ + xperm32 \ + +# Scalar Crypto ISE +riscv_insn_ext_k = \ + aes32dsi \ + aes32dsmi \ + aes32esi \ + aes32esmi \ + aes64ds \ + aes64dsm \ + aes64es \ + aes64esm \ + aes64ks1i \ + aes64ks2 \ + aes64im \ + sha256sig0 \ + sha256sig1 \ + sha256sum0 \ + sha256sum1 \ + sha512sig0 \ + sha512sig0h \ + sha512sig0l \ + sha512sig1 \ + sha512sig1h \ + sha512sig1l \ + sha512sum0 \ + sha512sum0r \ + sha512sum1 \ + sha512sum1r \ + sm3p0 \ + sm3p1 \ + sm4ed \ + sm4ks + +riscv_insn_ext_v_alu_int = \ + vaadd_vv \ + vaaddu_vv \ + vaadd_vx \ + vaaddu_vx \ + vadc_vim \ + vadc_vvm \ + vadc_vxm \ + vadd_vi \ + vadd_vv \ + vadd_vx \ + vand_vi \ + vand_vv \ + vand_vx \ + vasub_vv \ + vasubu_vv \ + vasub_vx \ + vasubu_vx \ + vcompress_vm \ + vcpop_m \ + vdiv_vv \ + vdiv_vx \ + vdivu_vv \ + vdivu_vx \ + vid_v \ + viota_m \ + vmacc_vv \ + vmacc_vx \ + vmadc_vv \ + vmadc_vx \ + vmadc_vi \ + vmadc_vim \ + vmadc_vvm \ + vmadc_vxm \ + vmadd_vv \ + vmadd_vx \ + vmand_mm \ + vmandn_mm \ + vmax_vv \ + vmax_vx \ + vmaxu_vv \ + vmaxu_vx \ + vmerge_vim \ + vmerge_vvm \ + vmerge_vxm \ + vfirst_m \ + vmin_vv \ + vmin_vx \ + vminu_vv \ + vminu_vx \ + vmnand_mm \ + vmnor_mm \ + vmor_mm \ + vmorn_mm \ + vmsbc_vv \ + vmsbc_vx \ + vmsbc_vvm \ + vmsbc_vxm \ + vmsbf_m \ + vmseq_vi \ + vmseq_vv \ + vmseq_vx \ + vmsgt_vi \ + vmsgt_vx \ + vmsgtu_vi \ + vmsgtu_vx \ + vmsif_m \ + vmsle_vi \ + vmsle_vv \ + vmsle_vx \ + vmsleu_vi \ + vmsleu_vv \ + vmsleu_vx \ + vmslt_vv \ + vmslt_vx \ + vmsltu_vv \ + vmsltu_vx \ + vmsne_vi \ + vmsne_vv \ + vmsne_vx \ + vmsof_m \ + vmul_vv \ + vmul_vx \ + vmulh_vv \ + vmulh_vx \ + vmulhsu_vv \ + vmulhsu_vx \ + vmulhu_vv \ + vmulhu_vx \ + vmv_s_x \ + vmv_v_i \ + vmv_v_v \ + vmv_v_x \ + vmv_x_s \ + vmv1r_v \ + vmv2r_v \ + vmv4r_v \ + vmv8r_v \ + vmxnor_mm \ + vmxor_mm \ + vnclip_wi \ + vnclip_wv \ + vnclip_wx \ + vnclipu_wi \ + vnclipu_wv \ + vnclipu_wx \ + vnmsac_vv \ + vnmsac_vx \ + vnmsub_vv \ + vnmsub_vx \ + vnsra_wi \ + vnsra_wv \ + vnsra_wx \ + vnsrl_wi \ + vnsrl_wv \ + vnsrl_wx \ + vor_vi \ + vor_vv \ + vor_vx \ + vredand_vs \ + vredmax_vs \ + vredmaxu_vs \ + vredmin_vs \ + vredminu_vs \ + vredor_vs \ + vredsum_vs \ + vredxor_vs \ + vrem_vv \ + vrem_vx \ + vremu_vv \ + vremu_vx \ + vrgather_vi \ + vrgather_vv \ + vrgather_vx \ + vrgatherei16_vv \ + vrsub_vi \ + vrsub_vx \ + vsadd_vi \ + vsadd_vv \ + vsadd_vx \ + vsaddu_vi \ + vsaddu_vv \ + vsaddu_vx \ + vsbc_vvm \ + vsbc_vxm \ + vsext_vf2 \ + vsext_vf4 \ + vsext_vf8 \ + vslide1down_vx \ + vslide1up_vx \ + vslidedown_vi \ + vslidedown_vx \ + vslideup_vi \ + vslideup_vx \ + vsll_vi \ + vsll_vv \ + vsll_vx \ + vsmul_vv \ + vsmul_vx \ + vsra_vi \ + vsra_vv \ + vsra_vx \ + vsrl_vi \ + vsrl_vv \ + vsrl_vx \ + vssra_vi \ + vssra_vv \ + vssra_vx \ + vssrl_vi \ + vssrl_vv \ + vssrl_vx \ + vssub_vv \ + vssub_vx \ + vssubu_vv \ + vssubu_vx \ + vsub_vv \ + vsub_vx \ + vwadd_vv \ + vwadd_vx \ + vwadd_wv \ + vwadd_wx \ + vwaddu_vv \ + vwaddu_vx \ + vwaddu_wv \ + vwaddu_wx \ + vwmacc_vv \ + vwmacc_vx \ + vwmaccsu_vv \ + vwmaccsu_vx \ + vwmaccu_vv \ + vwmaccu_vx \ + vwmaccus_vx \ + vwmul_vv \ + vwmul_vx \ + vwmulsu_vv \ + vwmulsu_vx \ + vwmulu_vv \ + vwmulu_vx \ + vwredsum_vs \ + vwredsumu_vs \ + vwsub_vv \ + vwsub_vx \ + vwsub_wv \ + vwsub_wx \ + vwsubu_vv \ + vwsubu_vx \ + vwsubu_wv \ + vwsubu_wx \ + vxor_vi \ + vxor_vv \ + vxor_vx \ + vzext_vf2 \ + vzext_vf4 \ + vzext_vf8 \ + +riscv_insn_ext_v_alu_fp = \ + vfadd_vf \ + vfadd_vv \ + vfclass_v \ + vfcvt_f_x_v \ + vfcvt_f_xu_v \ + vfcvt_rtz_x_f_v \ + vfcvt_rtz_xu_f_v \ + vfcvt_x_f_v \ + vfcvt_xu_f_v \ + vfdiv_vf \ + vfdiv_vv \ + vfmacc_vf \ + vfmacc_vv \ + vfmadd_vf \ + vfmadd_vv \ + vfmax_vf \ + vfmax_vv \ + vfmerge_vfm \ + vfmin_vf \ + vfmin_vv \ + vfmsac_vf \ + vfmsac_vv \ + vfmsub_vf \ + vfmsub_vv \ + vfmul_vf \ + vfmul_vv \ + vfmv_f_s \ + vfmv_s_f \ + vfmv_v_f \ + vfncvt_f_f_w \ + vfncvt_f_x_w \ + vfncvt_f_xu_w \ + vfncvt_rod_f_f_w \ + vfncvt_rtz_x_f_w \ + vfncvt_rtz_xu_f_w \ + vfncvt_x_f_w \ + vfncvt_xu_f_w \ + vfnmacc_vf \ + vfnmacc_vv \ + vfnmadd_vf \ + vfnmadd_vv \ + vfnmsac_vf \ + vfnmsac_vv \ + vfnmsub_vf \ + vfnmsub_vv \ + vfrdiv_vf \ + vfredmax_vs \ + vfredmin_vs \ + vfredosum_vs \ + vfredusum_vs \ + vfrec7_v \ + vfrsub_vf \ + vfrsqrt7_v \ + vfsgnj_vf \ + vfsgnj_vv \ + vfsgnjn_vf \ + vfsgnjn_vv \ + vfsgnjx_vf \ + vfsgnjx_vv \ + vfsqrt_v \ + vfslide1down_vf \ + vfslide1up_vf \ + vfsub_vf \ + vfsub_vv \ + vfwadd_vf \ + vfwadd_vv \ + vfwadd_wf \ + vfwadd_wv \ + vfwcvt_f_f_v \ + vfwcvt_f_x_v \ + vfwcvt_f_xu_v \ + vfwcvt_rtz_x_f_v \ + vfwcvt_rtz_xu_f_v \ + vfwcvt_x_f_v \ + vfwcvt_xu_f_v \ + vfwmacc_vf \ + vfwmacc_vv \ + vfwmsac_vf \ + vfwmsac_vv \ + vfwmul_vf \ + vfwmul_vv \ + vfwnmacc_vf \ + vfwnmacc_vv \ + vfwnmsac_vf \ + vfwnmsac_vv \ + vfwredosum_vs \ + vfwredusum_vs \ + vfwsub_vf \ + vfwsub_vv \ + vfwsub_wf \ + vfwsub_wv \ + vmfeq_vf \ + vmfeq_vv \ + vmfge_vf \ + vmfgt_vf \ + vmfle_vf \ + vmfle_vv \ + vmflt_vf \ + vmflt_vv \ + vmfne_vf \ + vmfne_vv \ + +riscv_insn_ext_v_amo = \ + vamoswapei8_v \ + vamoaddei8_v \ + vamoandei8_v \ + vamomaxei8_v \ + vamomaxuei8_v \ + vamominei8_v \ + vamominuei8_v \ + vamoorei8_v \ + vamoxorei8_v \ + vamoswapei16_v \ + vamoaddei16_v \ + vamoandei16_v \ + vamomaxei16_v \ + vamomaxuei16_v \ + vamominei16_v \ + vamominuei16_v \ + vamoorei16_v \ + vamoxorei16_v \ + vamoswapei32_v \ + vamoaddei32_v \ + vamoandei32_v \ + vamomaxei32_v \ + vamomaxuei32_v \ + vamominei32_v \ + vamominuei32_v \ + vamoorei32_v \ + vamoxorei32_v \ + vamoswapei64_v \ + vamoaddei64_v \ + vamoandei64_v \ + vamomaxei64_v \ + vamomaxuei64_v \ + vamominei64_v \ + vamominuei64_v \ + vamoorei64_v \ + vamoxorei64_v \ + +riscv_insn_ext_v_ldst = \ + vlm_v \ + vle8_v \ + vle16_v \ + vle32_v \ + vle64_v \ + vloxei8_v \ + vloxei16_v \ + vloxei32_v \ + vloxei64_v \ + vlse8_v \ + vlse16_v \ + vlse32_v \ + vlse64_v \ + vluxei8_v \ + vluxei16_v \ + vluxei32_v \ + vluxei64_v \ + vle8ff_v \ + vle16ff_v \ + vle32ff_v \ + vle64ff_v \ + vl1re8_v \ + vl2re8_v \ + vl4re8_v \ + vl8re8_v \ + vl1re16_v \ + vl2re16_v \ + vl4re16_v \ + vl8re16_v \ + vl1re32_v \ + vl2re32_v \ + vl4re32_v \ + vl8re32_v \ + vl1re64_v \ + vl2re64_v \ + vl4re64_v \ + vl8re64_v \ + vsm_v \ + vse8_v \ + vse16_v \ + vse32_v \ + vse64_v \ + vsse8_v \ + vsoxei8_v \ + vsoxei16_v \ + vsoxei32_v \ + vsoxei64_v \ + vsse16_v \ + vsse32_v \ + vsse64_v \ + vsuxei8_v \ + vsuxei16_v \ + vsuxei32_v \ + vsuxei64_v \ + vs1r_v \ + vs2r_v \ + vs4r_v \ + vs8r_v \ + +riscv_insn_ext_v_ctrl = \ + vsetivli \ + vsetvli \ + vsetvl \ + +riscv_insn_ext_v = \ + $(riscv_insn_ext_v_alu_fp) \ + $(riscv_insn_ext_v_alu_int) \ + $(riscv_insn_ext_v_amo) \ + $(riscv_insn_ext_v_ctrl) \ + $(riscv_insn_ext_v_ldst) \ + +riscv_insn_ext_h = \ + hfence_gvma \ + hfence_vvma \ + hlv_b \ + hlv_bu \ + hlv_h \ + hlv_hu \ + hlvx_hu \ + hlv_w \ + hlv_wu \ + hlvx_wu \ + hlv_d \ + hsv_b \ + hsv_h \ + hsv_w \ + hsv_d \ + +riscv_insn_ext_p_simd = \ + add16 \ + radd16 \ + uradd16 \ + kadd16 \ + ukadd16 \ + sub16 \ + rsub16 \ + ursub16 \ + ksub16 \ + uksub16 \ + cras16 \ + rcras16 \ + urcras16 \ + kcras16 \ + ukcras16 \ + crsa16 \ + rcrsa16 \ + urcrsa16 \ + kcrsa16 \ + ukcrsa16 \ + stas16 \ + rstas16 \ + urstas16 \ + kstas16 \ + ukstas16 \ + stsa16 \ + rstsa16 \ + urstsa16 \ + kstsa16 \ + ukstsa16 \ + add8 \ + radd8 \ + uradd8 \ + kadd8 \ + ukadd8 \ + sub8 \ + rsub8 \ + ursub8 \ + ksub8 \ + uksub8 \ + sra16 \ + srai16 \ + sra16_u \ + srai16_u \ + srl16 \ + srli16 \ + srl16_u \ + srli16_u \ + sll16 \ + slli16 \ + ksll16 \ + kslli16 \ + kslra16 \ + kslra16_u \ + sra8 \ + srai8 \ + sra8_u \ + srai8_u \ + srl8 \ + srli8 \ + srl8_u \ + srli8_u \ + sll8 \ + slli8 \ + ksll8 \ + kslli8 \ + kslra8 \ + kslra8_u \ + cmpeq16 \ + scmplt16 \ + scmple16 \ + ucmplt16 \ + ucmple16 \ + cmpeq8 \ + scmplt8 \ + scmple8 \ + ucmplt8 \ + ucmple8 \ + smul16 \ + smulx16 \ + umul16 \ + umulx16 \ + khm16 \ + khmx16 \ + smul8 \ + smulx8 \ + umul8 \ + umulx8 \ + khm8 \ + khmx8 \ + smin16 \ + umin16 \ + smax16 \ + umax16 \ + sclip16 \ + uclip16 \ + kabs16 \ + clrs16 \ + clz16 \ + smin8 \ + umin8 \ + smax8 \ + umax8 \ + sclip8 \ + uclip8 \ + kabs8 \ + clrs8 \ + clz8 \ + sunpkd810 \ + sunpkd820 \ + sunpkd830 \ + sunpkd831 \ + sunpkd832 \ + zunpkd810 \ + zunpkd820 \ + zunpkd830 \ + zunpkd831 \ + zunpkd832 \ + +riscv_insn_ext_p_partial_simd = \ + pkbb16 \ + pkbt16 \ + pktb16 \ + pktt16 \ + smmul \ + smmul_u \ + kmmac \ + kmmac_u \ + kmmsb \ + kmmsb_u \ + kwmmul \ + kwmmul_u \ + smmwb \ + smmwb_u \ + smmwt \ + smmwt_u \ + kmmawb \ + kmmawb_u \ + kmmawt \ + kmmawt_u \ + kmmwb2 \ + kmmwb2_u \ + kmmwt2 \ + kmmwt2_u \ + kmmawb2 \ + kmmawb2_u \ + kmmawt2 \ + kmmawt2_u \ + smbb16 \ + smbt16 \ + smtt16 \ + kmda \ + kmxda \ + smds \ + smdrs \ + smxds \ + kmabb \ + kmabt \ + kmatt \ + kmada \ + kmaxda \ + kmads \ + kmadrs \ + kmaxds \ + kmsda \ + kmsxda \ + smal \ + sclip32 \ + uclip32 \ + clrs32 \ + pbsad \ + pbsada \ + smaqa \ + umaqa \ + smaqa_su \ + +riscv_insn_ext_p_64_bit_profile = \ + add64 \ + radd64 \ + uradd64 \ + kadd64 \ + ukadd64 \ + sub64 \ + rsub64 \ + ursub64 \ + ksub64 \ + uksub64 \ + smar64 \ + smsr64 \ + umar64 \ + umsr64 \ + kmar64 \ + kmsr64 \ + ukmar64 \ + ukmsr64 \ + smalbb \ + smalbt \ + smaltt \ + smalda \ + smalxda \ + smalds \ + smaldrs \ + smalxds \ + smslda \ + smslxda \ + +riscv_insn_ext_p_non_simd = \ + kaddh \ + ksubh \ + khmbb \ + khmbt \ + khmtt \ + ukaddh \ + uksubh \ + kaddw \ + ukaddw \ + ksubw \ + uksubw \ + kdmbb \ + kdmbt \ + kdmtt \ + kslraw \ + kslraw_u \ + ksllw \ + kslliw \ + kdmabb \ + kdmabt \ + kdmatt \ + kabsw \ + raddw \ + uraddw \ + rsubw \ + ursubw \ + mulr64 \ + mulsr64 \ + msubr32 \ + ave \ + sra_u \ + srai_u \ + insb \ + maddr32 \ + +riscv_insn_ext_p_rv64_only = \ + add32 \ + radd32 \ + uradd32 \ + kadd32 \ + ukadd32 \ + sub32 \ + rsub32 \ + ursub32 \ + ksub32 \ + uksub32 \ + cras32 \ + rcras32 \ + urcras32 \ + kcras32 \ + ukcras32 \ + crsa32 \ + rcrsa32 \ + urcrsa32 \ + kcrsa32 \ + ukcrsa32 \ + stas32 \ + rstas32 \ + urstas32 \ + kstas32 \ + ukstas32 \ + stsa32 \ + rstsa32 \ + urstsa32 \ + kstsa32 \ + ukstsa32 \ + sra32 \ + srai32 \ + sra32_u \ + srai32_u \ + srl32 \ + srli32 \ + srl32_u \ + srli32_u \ + sll32 \ + slli32 \ + ksll32 \ + kslli32 \ + kslra32 \ + kslra32_u \ + smin32 \ + umin32 \ + smax32 \ + umax32 \ + kabs32 \ + khmbb16 \ + khmbt16 \ + khmtt16 \ + kdmbb16 \ + kdmbt16 \ + kdmtt16 \ + kdmabb16 \ + kdmabt16 \ + kdmatt16 \ + smbt32 \ + smtt32 \ + kmabb32 \ + kmabt32 \ + kmatt32 \ + kmda32 \ + kmxda32 \ + kmaxda32 \ + kmads32 \ + kmadrs32 \ + kmaxds32 \ + kmsda32 \ + kmsxda32 \ + smds32 \ + smdrs32 \ + smxds32 \ + sraiw_u \ + pkbt32 \ + pktb32 \ + clz32 \ + +riscv_insn_ext_p = \ + $(riscv_insn_ext_p_simd) \ + $(riscv_insn_ext_p_partial_simd) \ + $(riscv_insn_ext_p_64_bit_profile) \ + $(riscv_insn_ext_p_non_simd) \ + $(riscv_insn_ext_p_rv64_only) \ + +riscv_insn_priv = \ + csrrc \ + csrrci \ + csrrs \ + csrrsi \ + csrrw \ + csrrwi \ + dret \ + ebreak \ + ecall \ + mret \ + sfence_vma \ + sret \ + wfi \ + +riscv_insn_svinval = \ + sfence_w_inval \ + sfence_inval_ir \ + sinval_vma \ + hinval_vvma \ + hinval_gvma \ + +riscv_insn_ext_cmo = \ + cbo_clean \ + cbo_flush \ + cbo_inval \ + cbo_zero \ + +riscv_insn_list = \ + $(riscv_insn_ext_a) \ + $(riscv_insn_ext_c) \ + $(riscv_insn_ext_i) \ + $(riscv_insn_ext_m) \ + $(riscv_insn_ext_f) \ + $(riscv_insn_ext_d) \ + $(riscv_insn_ext_zfh) \ + $(riscv_insn_ext_q) \ + $(riscv_insn_ext_b) \ + $(riscv_insn_ext_k) \ + $(if $(HAVE_INT128),$(riscv_insn_ext_v),) \ + $(riscv_insn_ext_h) \ + $(riscv_insn_ext_p) \ + $(riscv_insn_priv) \ + $(riscv_insn_svinval) \ + $(riscv_insn_ext_cmo) \ + +riscv_gen_srcs = \ + $(addsuffix .cc,$(riscv_insn_list)) + +insn_list.h: $(src_dir)/riscv/riscv.mk.in + for insn in $(foreach insn,$(riscv_insn_list),$(subst .,_,$(insn))) ; do \ + printf 'DEFINE_INSN(%s)\n' "$${insn}" ; \ + done > $@.tmp + mv $@.tmp $@ + +$(riscv_gen_srcs): %.cc: insns/%.h insn_template.cc + sed 's/NAME/$(subst .cc,,$@)/' $(src_dir)/riscv/insn_template.cc | sed 's/OPCODE/$(call get_opcode,$(src_dir)/riscv/encoding.h,$(subst .cc,,$@))/' > $@ + +riscv_junk = \ + $(riscv_gen_srcs) \ diff --git a/vendor/riscv-isa-sim/riscv/rocc.cc b/vendor/riscv-isa-sim/riscv/rocc.cc new file mode 100644 index 00000000..2d090952 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/rocc.cc @@ -0,0 +1,46 @@ +// See LICENSE for license details. + +#include "rocc.h" +#include "trap.h" +#include + +#define customX(n) \ + static reg_t c##n(processor_t* p, insn_t insn, reg_t pc) \ + { \ + rocc_t* rocc = static_cast(p->get_extension()); \ + rocc_insn_union_t u; \ + u.i = insn; \ + reg_t xs1 = u.r.xs1 ? RS1 : -1; \ + reg_t xs2 = u.r.xs2 ? RS2 : -1; \ + reg_t xd = rocc->custom##n(u.r, xs1, xs2); \ + if (u.r.xd) \ + WRITE_RD(xd); \ + return pc+4; \ + } \ + \ + reg_t rocc_t::custom##n(rocc_insn_t insn, reg_t xs1, reg_t xs2) \ + { \ + illegal_instruction(); \ + return 0; \ + } + +customX(0) +customX(1) +customX(2) +customX(3) + +std::vector rocc_t::get_instructions() +{ + std::vector insns; + insns.push_back((insn_desc_t){true, 0x0b, 0x7f, &::illegal_instruction, c0, &::illegal_instruction, c0}); + insns.push_back((insn_desc_t){true, 0x2b, 0x7f, &::illegal_instruction, c1, &::illegal_instruction, c1}); + insns.push_back((insn_desc_t){true, 0x5b, 0x7f, &::illegal_instruction, c2, &::illegal_instruction, c2}); + insns.push_back((insn_desc_t){true, 0x7b, 0x7f, &::illegal_instruction, c3, &::illegal_instruction, c3}); + return insns; +} + +std::vector rocc_t::get_disasms() +{ + std::vector insns; + return insns; +} diff --git a/vendor/riscv-isa-sim/riscv/rocc.h b/vendor/riscv-isa-sim/riscv/rocc.h new file mode 100644 index 00000000..1a522ab4 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/rocc.h @@ -0,0 +1,61 @@ +#ifndef _RISCV_ROCC_H +#define _RISCV_ROCC_H + +#include "extension.h" + +struct rocc_insn_t +{ + unsigned opcode : 7; + unsigned rd : 5; + unsigned xs2 : 1; + unsigned xs1 : 1; + unsigned xd : 1; + unsigned rs1 : 5; + unsigned rs2 : 5; + unsigned funct : 7; +}; + +union rocc_insn_union_t +{ + rocc_insn_t r; + insn_t i; +}; + +class rocc_t : public extension_t +{ + public: + virtual reg_t custom0(rocc_insn_t insn, reg_t xs1, reg_t xs2); + virtual reg_t custom1(rocc_insn_t insn, reg_t xs1, reg_t xs2); + virtual reg_t custom2(rocc_insn_t insn, reg_t xs1, reg_t xs2); + virtual reg_t custom3(rocc_insn_t insn, reg_t xs1, reg_t xs2); + std::vector get_instructions(); + std::vector get_disasms(); +}; + +#define define_custom_func(type_name, ext_name_str, func_name, method_name) \ + static reg_t func_name(processor_t* p, insn_t insn, reg_t pc) \ + { \ + type_name* rocc = static_cast(p->get_extension(ext_name_str)); \ + rocc_insn_union_t u; \ + u.i = insn; \ + reg_t xs1 = u.r.xs1 ? RS1 : -1; \ + reg_t xs2 = u.r.xs2 ? RS2 : -1; \ + reg_t xd = rocc->method_name(u.r, xs1, xs2); \ + if (u.r.xd) \ + WRITE_RD(xd); \ + return pc+4; \ + } \ + +#define push_custom_insn(insn_list, opcode, opcode_mask, func_name_32, func_name_64) \ + insn_list.push_back((insn_desc_t){opcode, opcode_mask, func_name_32, func_name_64}) + +#define ILLEGAL_INSN_FUNC &::illegal_instruction + +#define ROCC_OPCODE0 0x0b +#define ROCC_OPCODE1 0x2b +#define ROCC_OPCODE2 0x5b +#define ROCC_OPCODE3 0x7b + +#define ROCC_OPCODE_MASK 0x7f + +#endif diff --git a/vendor/riscv-isa-sim/riscv/rom.cc b/vendor/riscv-isa-sim/riscv/rom.cc new file mode 100644 index 00000000..b8528621 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/rom.cc @@ -0,0 +1,19 @@ +#include "devices.h" + +rom_device_t::rom_device_t(std::vector data) + : data(data) +{ +} + +bool rom_device_t::load(reg_t addr, size_t len, uint8_t* bytes) +{ + if (addr + len > data.size()) + return false; + memcpy(bytes, &data[addr], len); + return true; +} + +bool rom_device_t::store(reg_t addr, size_t len, const uint8_t* bytes) +{ + return false; +} diff --git a/vendor/riscv-isa-sim/riscv/sim.cc b/vendor/riscv-isa-sim/riscv/sim.cc new file mode 100644 index 00000000..069e1b51 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/sim.cc @@ -0,0 +1,438 @@ +// See LICENSE for license details. + +#include "sim.h" +#include "mmu.h" +#include "dts.h" +#include "remote_bitbang.h" +#include "byteorder.h" +#include "platform.h" +#include "libfdt.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +volatile bool ctrlc_pressed = false; +static void handle_signal(int sig) +{ + if (ctrlc_pressed) + exit(-1); + ctrlc_pressed = true; + signal(sig, &handle_signal); +} + +sim_t::sim_t(const cfg_t *cfg, bool halted, + std::vector> mems, + std::vector> plugin_devices, + const std::vector& args, + const debug_module_config_t &dm_config, + const char *log_path, + bool dtb_enabled, const char *dtb_file, +#ifdef HAVE_BOOST_ASIO + boost::asio::io_service *io_service_ptr, boost::asio::ip::tcp::acceptor *acceptor_ptr, // option -s +#endif + FILE *cmd_file) // needed for command line option --cmd + : htif_t(args), + isa(cfg->isa(), cfg->priv()), + cfg(cfg), + mems(mems), + plugin_devices(plugin_devices), + procs(std::max(cfg->nprocs(), size_t(1))), + dtb_file(dtb_file ? dtb_file : ""), + dtb_enabled(dtb_enabled), + log_file(log_path), + cmd_file(cmd_file), +#ifdef HAVE_BOOST_ASIO + io_service_ptr(io_service_ptr), // socket interface + acceptor_ptr(acceptor_ptr), +#endif + sout_(nullptr), + current_step(0), + current_proc(0), + debug(false), + histogram_enabled(false), + log(false), + remote_bitbang(NULL), + debug_module(this, dm_config) +{ + signal(SIGINT, &handle_signal); + + sout_.rdbuf(std::cerr.rdbuf()); // debug output goes to stderr by default + + for (auto& x : mems) + bus.add_device(x.first, x.second); + + for (auto& x : plugin_devices) + bus.add_device(x.first, x.second); + + debug_module.add_device(&bus); + + debug_mmu = new mmu_t(this, NULL); + + for (size_t i = 0; i < cfg->nprocs(); i++) { + procs[i] = new processor_t(&isa, cfg->varch(), this, cfg->hartids()[i], halted, + log_file.get(), sout_); + } + + make_dtb(); + + void *fdt = (void *)dtb.c_str(); + + // Only make a CLINT (Core-Local INTerrupt controller) if one is specified in + // the device tree configuration. + // + // This isn't *quite* as general as we could get (because you might have one + // that's not bus-accessible), but it should handle the normal use cases. In + // particular, the default device tree configuration that you get without + // setting the dtb_file argument has one. + reg_t clint_base; + if (fdt_parse_clint(fdt, &clint_base, "riscv,clint0") == 0) { + clint.reset(new clint_t(procs, CPU_HZ / INSNS_PER_RTC_TICK, cfg->real_time_clint())); + bus.add_device(clint_base, clint.get()); + } + + //per core attribute + int cpu_offset = 0, rc; + size_t cpu_idx = 0; + cpu_offset = fdt_get_offset(fdt, "/cpus"); + if (cpu_offset < 0) + return; + + for (cpu_offset = fdt_get_first_subnode(fdt, cpu_offset); cpu_offset >= 0; + cpu_offset = fdt_get_next_subnode(fdt, cpu_offset)) { + + if (cpu_idx >= nprocs()) + break; + + //handle pmp + reg_t pmp_num = 0, pmp_granularity = 0; + if (fdt_parse_pmp_num(fdt, cpu_offset, &pmp_num) == 0) { + if (pmp_num <= 64) { + procs[cpu_idx]->set_pmp_num(pmp_num); + } else { + std::cerr << "core (" + << cpu_idx + << ") doesn't have valid 'riscv,pmpregions'" + << pmp_num << ").\n"; + exit(1); + } + } else { + procs[cpu_idx]->set_pmp_num(0); + } + + if (fdt_parse_pmp_alignment(fdt, cpu_offset, &pmp_granularity) == 0) { + procs[cpu_idx]->set_pmp_granularity(pmp_granularity); + } + + //handle mmu-type + const char *mmu_type; + rc = fdt_parse_mmu_type(fdt, cpu_offset, &mmu_type); + if (rc == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SBARE); + if (strncmp(mmu_type, "riscv,sv32", strlen("riscv,sv32")) == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SV32); + } else if (strncmp(mmu_type, "riscv,sv39", strlen("riscv,sv39")) == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SV39); + } else if (strncmp(mmu_type, "riscv,sv48", strlen("riscv,sv48")) == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SV48); + } else if (strncmp(mmu_type, "riscv,sv57", strlen("riscv,sv57")) == 0) { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SV57); + } else if (strncmp(mmu_type, "riscv,sbare", strlen("riscv,sbare")) == 0) { + //has been set in the beginning + } else { + std::cerr << "core (" + << cpu_idx + << ") has an invalid 'mmu-type': " + << mmu_type << ").\n"; + exit(1); + } + } else { + procs[cpu_idx]->set_mmu_capability(IMPL_MMU_SBARE); + } + + cpu_idx++; + } + + if (cpu_idx != nprocs()) { + std::cerr << "core number in dts (" + << cpu_idx + << ") doesn't match it in command line (" + << nprocs() << ").\n"; + exit(1); + } +} + +sim_t::~sim_t() +{ + for (size_t i = 0; i < procs.size(); i++) + delete procs[i]; + delete debug_mmu; +} + +void sim_thread_main(void* arg) +{ + ((sim_t*)arg)->main(); +} + +void sim_t::main() +{ + if (!debug && log) + set_procs_debug(true); + + while (!done()) + { + if (debug || ctrlc_pressed) + interactive(); + else + step(INTERLEAVE); + if (remote_bitbang) { + remote_bitbang->tick(); + } + } +} + +int sim_t::run() +{ + host = context_t::current(); + target.init(sim_thread_main, this); + return htif_t::run(); +} + +void sim_t::step(size_t n) +{ + for (size_t i = 0, steps = 0; i < n; i += steps) + { + steps = std::min(n - i, INTERLEAVE - current_step); + procs[current_proc]->step(steps); + + current_step += steps; + if (current_step == INTERLEAVE) + { + current_step = 0; + procs[current_proc]->get_mmu()->yield_load_reservation(); + if (++current_proc == procs.size()) { + current_proc = 0; + if (clint) clint->increment(INTERLEAVE / INSNS_PER_RTC_TICK); + } + + host->switch_to(); + } + } +} + +void sim_t::set_debug(bool value) +{ + debug = value; +} + +void sim_t::set_histogram(bool value) +{ + histogram_enabled = value; + for (size_t i = 0; i < procs.size(); i++) { + procs[i]->set_histogram(histogram_enabled); + } +} + +void sim_t::configure_log(bool enable_log, bool enable_commitlog) +{ + log = enable_log; + + if (!enable_commitlog) + return; + +#ifndef RISCV_ENABLE_COMMITLOG + fputs("Commit logging support has not been properly enabled; " + "please re-build the riscv-isa-sim project using " + "\"configure --enable-commitlog\".\n", + stderr); + abort(); +#else + for (processor_t *proc : procs) { + proc->enable_log_commits(); + } +#endif +} + +void sim_t::set_procs_debug(bool value) +{ + for (size_t i=0; i< procs.size(); i++) + procs[i]->set_debug(value); +} + +static bool paddr_ok(reg_t addr) +{ + return (addr >> MAX_PADDR_BITS) == 0; +} + +bool sim_t::mmio_load(reg_t addr, size_t len, uint8_t* bytes) +{ + if (addr + len < addr || !paddr_ok(addr + len - 1)) + return false; + return bus.load(addr, len, bytes); +} + +bool sim_t::mmio_store(reg_t addr, size_t len, const uint8_t* bytes) +{ + if (addr + len < addr || !paddr_ok(addr + len - 1)) + return false; + return bus.store(addr, len, bytes); +} + +void sim_t::make_dtb() +{ + if (!dtb_file.empty()) { + std::ifstream fin(dtb_file.c_str(), std::ios::binary); + if (!fin.good()) { + std::cerr << "can't find dtb file: " << dtb_file << std::endl; + exit(-1); + } + + std::stringstream strstream; + strstream << fin.rdbuf(); + + dtb = strstream.str(); + } else { + std::pair initrd_bounds = cfg->initrd_bounds(); + dts = make_dts(INSNS_PER_RTC_TICK, CPU_HZ, + initrd_bounds.first, initrd_bounds.second, + cfg->bootargs(), procs, mems); + dtb = dts_compile(dts); + } + + int fdt_code = fdt_check_header(dtb.c_str()); + if (fdt_code) { + std::cerr << "Failed to read DTB from "; + if (dtb_file.empty()) { + std::cerr << "auto-generated DTS string"; + } else { + std::cerr << "`" << dtb_file << "'"; + } + std::cerr << ": " << fdt_strerror(fdt_code) << ".\n"; + exit(-1); + } +} + +void sim_t::set_rom() +{ + const int reset_vec_size = 8; + + reg_t start_pc = cfg->start_pc.value_or(get_entry_point()); + + uint32_t reset_vec[reset_vec_size] = { + 0x297, // auipc t0,0x0 + 0x28593 + (reset_vec_size * 4 << 20), // addi a1, t0, &dtb + 0xf1402573, // csrr a0, mhartid + get_core(0)->get_xlen() == 32 ? + 0x0182a283u : // lw t0,24(t0) + 0x0182b283u, // ld t0,24(t0) + 0x28067, // jr t0 + 0, + (uint32_t) (start_pc & 0xffffffff), + (uint32_t) (start_pc >> 32) + }; + if (get_target_endianness() == memif_endianness_big) { + int i; + // Instuctions are little endian + for (i = 0; reset_vec[i] != 0; i++) + reset_vec[i] = to_le(reset_vec[i]); + // Data is big endian + for (; i < reset_vec_size; i++) + reset_vec[i] = to_be(reset_vec[i]); + + // Correct the high/low order of 64-bit start PC + if (get_core(0)->get_xlen() != 32) + std::swap(reset_vec[reset_vec_size-2], reset_vec[reset_vec_size-1]); + } else { + for (int i = 0; i < reset_vec_size; i++) + reset_vec[i] = to_le(reset_vec[i]); + } + + std::vector rom((char*)reset_vec, (char*)reset_vec + sizeof(reset_vec)); + + rom.insert(rom.end(), dtb.begin(), dtb.end()); + const int align = 0x1000; + rom.resize((rom.size() + align - 1) / align * align); + + boot_rom.reset(new rom_device_t(rom)); + bus.add_device(DEFAULT_RSTVEC, boot_rom.get()); +} + +char* sim_t::addr_to_mem(reg_t addr) { + if (!paddr_ok(addr)) + return NULL; + auto desc = bus.find_device(addr); + if (auto mem = dynamic_cast(desc.second)) + if (addr - desc.first < mem->size()) + return mem->contents(addr - desc.first); + return NULL; +} + +const char* sim_t::get_symbol(uint64_t addr) +{ + return htif_t::get_symbol(addr); +} + +// htif + +void sim_t::reset() +{ + if (dtb_enabled) + set_rom(); +} + +void sim_t::idle() +{ + target.switch_to(); +} + +void sim_t::read_chunk(addr_t taddr, size_t len, void* dst) +{ + assert(len == 8); + auto data = debug_mmu->to_target(debug_mmu->load_uint64(taddr)); + memcpy(dst, &data, sizeof data); +} + +void sim_t::write_chunk(addr_t taddr, size_t len, const void* src) +{ + assert(len == 8); + target_endian data; + memcpy(&data, src, sizeof data); + debug_mmu->store_uint64(taddr, debug_mmu->from_target(data)); +} + +void sim_t::set_target_endianness(memif_endianness_t endianness) +{ +#ifdef RISCV_ENABLE_DUAL_ENDIAN + assert(endianness == memif_endianness_little || endianness == memif_endianness_big); + + bool enable = endianness == memif_endianness_big; + debug_mmu->set_target_big_endian(enable); + for (size_t i = 0; i < procs.size(); i++) { + procs[i]->get_mmu()->set_target_big_endian(enable); + procs[i]->reset(); + } +#else + assert(endianness == memif_endianness_little); +#endif +} + +memif_endianness_t sim_t::get_target_endianness() const +{ +#ifdef RISCV_ENABLE_DUAL_ENDIAN + return debug_mmu->is_target_big_endian()? memif_endianness_big : memif_endianness_little; +#else + return memif_endianness_little; +#endif +} + +void sim_t::proc_reset(unsigned id) +{ + debug_module.proc_reset(id); +} diff --git a/vendor/riscv-isa-sim/riscv/sim.h b/vendor/riscv-isa-sim/riscv/sim.h new file mode 100644 index 00000000..97cada13 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/sim.h @@ -0,0 +1,175 @@ +// See LICENSE for license details. + +#ifndef _RISCV_SIM_H +#define _RISCV_SIM_H + +#include "config.h" + +#ifdef HAVE_BOOST_ASIO +#include +#include +#include +#endif + +#include "cfg.h" +#include "debug_module.h" +#include "devices.h" +#include "log_file.h" +#include "processor.h" +#include "simif.h" + +#include +#include +#include +#include +#include +#include + +class mmu_t; +class remote_bitbang_t; + +// this class encapsulates the processors and memory in a RISC-V machine. +class sim_t : public htif_t, public simif_t +{ +public: + sim_t(const cfg_t *cfg, bool halted, + std::vector> mems, + std::vector> plugin_devices, + const std::vector& args, + const debug_module_config_t &dm_config, const char *log_path, + bool dtb_enabled, const char *dtb_file, +#ifdef HAVE_BOOST_ASIO + boost::asio::io_service *io_service_ptr_ctor, boost::asio::ip::tcp::acceptor *acceptor_ptr_ctor, // option -s +#endif + FILE *cmd_file); // needed for command line option --cmd + ~sim_t(); + + // run the simulation to completion + int run(); + void set_debug(bool value); + void set_histogram(bool value); + + // Configure logging + // + // If enable_log is true, an instruction trace will be generated. If + // enable_commitlog is true, so will the commit results (if this + // build was configured without support for commit logging, the + // function will print an error message and abort). + void configure_log(bool enable_log, bool enable_commitlog); + + void set_procs_debug(bool value); + void set_remote_bitbang(remote_bitbang_t* remote_bitbang) { + this->remote_bitbang = remote_bitbang; + } + const char* get_dts() { if (dts.empty()) reset(); return dts.c_str(); } + processor_t* get_core(size_t i) { return procs.at(i); } + unsigned nprocs() const { return procs.size(); } + + // Callback for processors to let the simulation know they were reset. + void proc_reset(unsigned id); + +private: + isa_parser_t isa; + const cfg_t * const cfg; + std::vector> mems; + std::vector> plugin_devices; + mmu_t* debug_mmu; // debug port into main memory + std::vector procs; + std::pair initrd_range; + std::string dts; + std::string dtb; + std::string dtb_file; + bool dtb_enabled; + std::unique_ptr boot_rom; + std::unique_ptr clint; + bus_t bus; + log_file_t log_file; + + FILE *cmd_file; // pointer to debug command input file + +#ifdef HAVE_BOOST_ASIO + // the following are needed for command socket interface + boost::asio::io_service *io_service_ptr; + boost::asio::ip::tcp::acceptor *acceptor_ptr; + std::unique_ptr socket_ptr; + std::string rin(boost::asio::streambuf *bout_ptr); // read input command string + void wout(boost::asio::streambuf *bout_ptr); // write output to socket +#endif + std::ostream sout_; // used for socket and terminal interface + + processor_t* get_core(const std::string& i); + void step(size_t n); // step through simulation + static const size_t INTERLEAVE = 5000; + static const size_t INSNS_PER_RTC_TICK = 100; // 10 MHz clock for 1 BIPS core + static const size_t CPU_HZ = 1000000000; // 1GHz CPU + size_t current_step; + size_t current_proc; + bool debug; + bool histogram_enabled; // provide a histogram of PCs + bool log; + remote_bitbang_t* remote_bitbang; + + // memory-mapped I/O routines + char* addr_to_mem(reg_t addr); + bool mmio_load(reg_t addr, size_t len, uint8_t* bytes); + bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes); + void make_dtb(); + void set_rom(); + + const char* get_symbol(uint64_t addr); + + // presents a prompt for introspection into the simulation + void interactive(); + + // functions that help implement interactive() + void interactive_help(const std::string& cmd, const std::vector& args); + void interactive_quit(const std::string& cmd, const std::vector& args); + void interactive_run(const std::string& cmd, const std::vector& args, bool noisy); + void interactive_run_noisy(const std::string& cmd, const std::vector& args); + void interactive_run_silent(const std::string& cmd, const std::vector& args); + void interactive_vreg(const std::string& cmd, const std::vector& args); + void interactive_reg(const std::string& cmd, const std::vector& args); + void interactive_freg(const std::string& cmd, const std::vector& args); + void interactive_fregh(const std::string& cmd, const std::vector& args); + void interactive_fregs(const std::string& cmd, const std::vector& args); + void interactive_fregd(const std::string& cmd, const std::vector& args); + void interactive_pc(const std::string& cmd, const std::vector& args); + void interactive_mem(const std::string& cmd, const std::vector& args); + void interactive_str(const std::string& cmd, const std::vector& args); + void interactive_until(const std::string& cmd, const std::vector& args, bool noisy); + void interactive_until_silent(const std::string& cmd, const std::vector& args); + void interactive_until_noisy(const std::string& cmd, const std::vector& args); + reg_t get_reg(const std::vector& args); + freg_t get_freg(const std::vector& args); + reg_t get_mem(const std::vector& args); + reg_t get_pc(const std::vector& args); + + friend class processor_t; + friend class mmu_t; + friend class debug_module_t; + + // htif + friend void sim_thread_main(void*); + void main(); + + context_t* host; + context_t target; + void reset(); + void idle(); + void read_chunk(addr_t taddr, size_t len, void* dst); + void write_chunk(addr_t taddr, size_t len, const void* src); + size_t chunk_align() { return 8; } + size_t chunk_max_size() { return 8; } + void set_target_endianness(memif_endianness_t endianness); + memif_endianness_t get_target_endianness() const; + +public: + // Initialize this after procs, because in debug_module_t::reset() we + // enumerate processors, which segfaults if procs hasn't been initialized + // yet. + debug_module_t debug_module; +}; + +extern volatile bool ctrlc_pressed; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/simif.h b/vendor/riscv-isa-sim/riscv/simif.h new file mode 100644 index 00000000..0e75d45b --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/simif.h @@ -0,0 +1,24 @@ +// See LICENSE for license details. + +#ifndef _RISCV_SIMIF_H +#define _RISCV_SIMIF_H + +#include "decode.h" + +// this is the interface to the simulator used by the processors and memory +class simif_t +{ +public: + // should return NULL for MMIO addresses + virtual char* addr_to_mem(reg_t addr) = 0; + // used for MMIO addresses + virtual bool mmio_load(reg_t addr, size_t len, uint8_t* bytes) = 0; + virtual bool mmio_store(reg_t addr, size_t len, const uint8_t* bytes) = 0; + // Callback for processors to let the simulation know they were reset. + virtual void proc_reset(unsigned id) = 0; + + virtual const char* get_symbol(uint64_t addr) = 0; + +}; + +#endif diff --git a/vendor/riscv-isa-sim/riscv/tracer.h b/vendor/riscv-isa-sim/riscv/tracer.h new file mode 100644 index 00000000..9f1bc784 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/tracer.h @@ -0,0 +1,11 @@ +// See LICENSE for license details. + +#ifndef _RISCV_TRACER_H +#define _RISCV_TRACER_H + +#include "processor.h" + +static inline void trace_opcode(processor_t* p, insn_bits_t opc, insn_t insn) { +} + +#endif diff --git a/vendor/riscv-isa-sim/riscv/trap.h b/vendor/riscv-isa-sim/riscv/trap.h new file mode 100644 index 00000000..1cd62e15 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/trap.h @@ -0,0 +1,116 @@ +// See LICENSE for license details. + +#ifndef _RISCV_TRAP_H +#define _RISCV_TRAP_H + +#include "decode.h" +#include + +struct state_t; + +class trap_t +{ + public: + trap_t(reg_t which) : which(which) {} + virtual bool has_gva() { return false; } + virtual bool has_tval() { return false; } + virtual reg_t get_tval() { return 0; } + virtual bool has_tval2() { return false; } + virtual reg_t get_tval2() { return 0; } + virtual bool has_tinst() { return false; } + virtual reg_t get_tinst() { return 0; } + reg_t cause() { return which; } + + virtual const char* name() + { + const char* fmt = uint8_t(which) == which ? "trap #%u" : "interrupt #%u"; + sprintf(_name, fmt, uint8_t(which)); + return _name; + } + + private: + char _name[16]; + reg_t which; +}; + +class insn_trap_t : public trap_t +{ + public: + insn_trap_t(reg_t which, bool gva, reg_t tval) + : trap_t(which), gva(gva), tval(tval) {} + bool has_gva() override { return gva; } + bool has_tval() override { return true; } + reg_t get_tval() override { return tval; } + private: + bool gva; + reg_t tval; +}; + +class mem_trap_t : public trap_t +{ + public: + mem_trap_t(reg_t which, bool gva, reg_t tval, reg_t tval2, reg_t tinst) + : trap_t(which), gva(gva), tval(tval), tval2(tval2), tinst(tinst) {} + bool has_gva() override { return gva; } + bool has_tval() override { return true; } + reg_t get_tval() override { return tval; } + bool has_tval2() override { return true; } + reg_t get_tval2() override { return tval2; } + bool has_tinst() override { return true; } + reg_t get_tinst() override { return tinst; } + private: + bool gva; + reg_t tval, tval2, tinst; +}; + +#define DECLARE_TRAP(n, x) class trap_##x : public trap_t { \ + public: \ + trap_##x() : trap_t(n) {} \ + const char* name() { return "trap_"#x; } \ +}; + +#define DECLARE_INST_TRAP(n, x) class trap_##x : public insn_trap_t { \ + public: \ + trap_##x(reg_t tval) : insn_trap_t(n, /*gva*/false, tval) {} \ + const char* name() { return "trap_"#x; } \ +}; + +#define DECLARE_INST_WITH_GVA_TRAP(n, x) class trap_##x : public insn_trap_t { \ + public: \ + trap_##x(bool gva, reg_t tval) : insn_trap_t(n, gva, tval) {} \ + const char* name() { return "trap_"#x; } \ +}; + +#define DECLARE_MEM_TRAP(n, x) class trap_##x : public mem_trap_t { \ + public: \ + trap_##x(bool gva, reg_t tval, reg_t tval2, reg_t tinst) : mem_trap_t(n, gva, tval, tval2, tinst) {} \ + const char* name() { return "trap_"#x; } \ +}; + +#define DECLARE_MEM_GVA_TRAP(n, x) class trap_##x : public mem_trap_t { \ + public: \ + trap_##x(reg_t tval, reg_t tval2, reg_t tinst) : mem_trap_t(n, true, tval, tval2, tinst) {} \ + const char* name() { return "trap_"#x; } \ +}; + +DECLARE_MEM_TRAP(CAUSE_MISALIGNED_FETCH, instruction_address_misaligned) +DECLARE_MEM_TRAP(CAUSE_FETCH_ACCESS, instruction_access_fault) +DECLARE_INST_TRAP(CAUSE_ILLEGAL_INSTRUCTION, illegal_instruction) +DECLARE_INST_WITH_GVA_TRAP(CAUSE_BREAKPOINT, breakpoint) +DECLARE_MEM_TRAP(CAUSE_MISALIGNED_LOAD, load_address_misaligned) +DECLARE_MEM_TRAP(CAUSE_MISALIGNED_STORE, store_address_misaligned) +DECLARE_MEM_TRAP(CAUSE_LOAD_ACCESS, load_access_fault) +DECLARE_MEM_TRAP(CAUSE_STORE_ACCESS, store_access_fault) +DECLARE_TRAP(CAUSE_USER_ECALL, user_ecall) +DECLARE_TRAP(CAUSE_SUPERVISOR_ECALL, supervisor_ecall) +DECLARE_TRAP(CAUSE_VIRTUAL_SUPERVISOR_ECALL, virtual_supervisor_ecall) +DECLARE_TRAP(CAUSE_MACHINE_ECALL, machine_ecall) +DECLARE_MEM_TRAP(CAUSE_FETCH_PAGE_FAULT, instruction_page_fault) +DECLARE_MEM_TRAP(CAUSE_LOAD_PAGE_FAULT, load_page_fault) +DECLARE_MEM_TRAP(CAUSE_STORE_PAGE_FAULT, store_page_fault) +DECLARE_MEM_GVA_TRAP(CAUSE_FETCH_GUEST_PAGE_FAULT, instruction_guest_page_fault) +DECLARE_MEM_GVA_TRAP(CAUSE_LOAD_GUEST_PAGE_FAULT, load_guest_page_fault) +DECLARE_INST_TRAP(CAUSE_VIRTUAL_INSTRUCTION, virtual_instruction) +DECLARE_MEM_GVA_TRAP(CAUSE_STORE_GUEST_PAGE_FAULT, store_guest_page_fault) + +#endif diff --git a/vendor/riscv-isa-sim/riscv/triggers.cc b/vendor/riscv-isa-sim/riscv/triggers.cc new file mode 100644 index 00000000..69888bf5 --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/triggers.cc @@ -0,0 +1,206 @@ +#include "processor.h" +#include "triggers.h" + +namespace triggers { + +mcontrol_t::mcontrol_t() : + type(2), maskmax(0), select(false), timing(false), chain_bit(false), + match(MATCH_EQUAL), m(false), h(false), s(false), u(false), + execute_bit(false), store_bit(false), load_bit(false) +{ +} + +reg_t mcontrol_t::tdata1_read(const processor_t * const proc) const noexcept { + reg_t v = 0; + auto xlen = proc->get_xlen(); + v = set_field(v, MCONTROL_TYPE(xlen), type); + v = set_field(v, MCONTROL_DMODE(xlen), dmode); + v = set_field(v, MCONTROL_MASKMAX(xlen), maskmax); + v = set_field(v, MCONTROL_SELECT, select); + v = set_field(v, MCONTROL_TIMING, timing); + v = set_field(v, MCONTROL_ACTION, action); + v = set_field(v, MCONTROL_CHAIN, chain_bit); + v = set_field(v, MCONTROL_MATCH, match); + v = set_field(v, MCONTROL_M, m); + v = set_field(v, MCONTROL_H, h); + v = set_field(v, MCONTROL_S, s); + v = set_field(v, MCONTROL_U, u); + v = set_field(v, MCONTROL_EXECUTE, execute_bit); + v = set_field(v, MCONTROL_STORE, store_bit); + v = set_field(v, MCONTROL_LOAD, load_bit); + return v; +} + +bool mcontrol_t::tdata1_write(processor_t * const proc, const reg_t val) noexcept { + if (dmode && !proc->get_state()->debug_mode) { + return false; + } + auto xlen = proc->get_xlen(); + dmode = get_field(val, MCONTROL_DMODE(xlen)); + select = get_field(val, MCONTROL_SELECT); + timing = get_field(val, MCONTROL_TIMING); + action = (triggers::action_t) get_field(val, MCONTROL_ACTION); + chain_bit = get_field(val, MCONTROL_CHAIN); + unsigned match_value = get_field(val, MCONTROL_MATCH); + switch (match_value) { + case MATCH_EQUAL: + case MATCH_NAPOT: + case MATCH_GE: + case MATCH_LT: + case MATCH_MASK_LOW: + case MATCH_MASK_HIGH: + match = (triggers::mcontrol_t::match_t) match_value; + break; + default: + match = MATCH_EQUAL; + break; + } + m = get_field(val, MCONTROL_M); + h = get_field(val, MCONTROL_H); + s = get_field(val, MCONTROL_S); + u = get_field(val, MCONTROL_U); + execute_bit = get_field(val, MCONTROL_EXECUTE); + store_bit = get_field(val, MCONTROL_STORE); + load_bit = get_field(val, MCONTROL_LOAD); + // Assume we're here because of csrw. + if (execute_bit) + timing = 0; + return true; +} + +reg_t mcontrol_t::tdata2_read(const processor_t * const proc) const noexcept { + return tdata2; +} + +bool mcontrol_t::tdata2_write(processor_t * const proc, const reg_t val) noexcept { + if (dmode && !proc->get_state()->debug_mode) { + return false; + } + tdata2 = val; + return true; +} + +bool mcontrol_t::simple_match(unsigned xlen, reg_t value) const { + switch (match) { + case triggers::mcontrol_t::MATCH_EQUAL: + return value == tdata2; + case triggers::mcontrol_t::MATCH_NAPOT: + { + reg_t mask = ~((1 << (cto(tdata2)+1)) - 1); + return (value & mask) == (tdata2 & mask); + } + case triggers::mcontrol_t::MATCH_GE: + return value >= tdata2; + case triggers::mcontrol_t::MATCH_LT: + return value < tdata2; + case triggers::mcontrol_t::MATCH_MASK_LOW: + { + reg_t mask = tdata2 >> (xlen/2); + return (value & mask) == (tdata2 & mask); + } + case triggers::mcontrol_t::MATCH_MASK_HIGH: + { + reg_t mask = tdata2 >> (xlen/2); + return ((value >> (xlen/2)) & mask) == (tdata2 & mask); + } + } + assert(0); +} + +match_result_t mcontrol_t::memory_access_match(processor_t * const proc, operation_t operation, reg_t address, reg_t data) { + state_t * const state = proc->get_state(); + if ((operation == triggers::OPERATION_EXECUTE && !execute_bit) || + (operation == triggers::OPERATION_STORE && !store_bit) || + (operation == triggers::OPERATION_LOAD && !load_bit) || + (state->prv == PRV_M && !m) || + (state->prv == PRV_S && !s) || + (state->prv == PRV_U && !u)) { + return MATCH_NONE; + } + + reg_t value; + if (select) { + value = data; + } else { + value = address; + } + + // We need this because in 32-bit mode sometimes the PC bits get sign + // extended. + auto xlen = proc->get_xlen(); + if (xlen == 32) { + value &= 0xffffffff; + } + + if (simple_match(xlen, value)) { + if (timing) + return MATCH_FIRE_AFTER; + else + return MATCH_FIRE_BEFORE; + } + return MATCH_NONE; +} + +module_t::module_t(unsigned count) : triggers(count) { + for (unsigned i = 0; i < count; i++) { + triggers[i] = new mcontrol_t(); + } +} + +module_t::~module_t() { + for (auto trigger : triggers) { + delete trigger; + } +} + +match_result_t module_t::memory_access_match(action_t * const action, operation_t operation, reg_t address, reg_t data) +{ + state_t * const state = proc->get_state(); + if (state->debug_mode) + return MATCH_NONE; + + bool chain_ok = true; + + for (unsigned int i = 0; i < triggers.size(); i++) { + if (!chain_ok) { + chain_ok |= !triggers[i]->chain(); + continue; + } + + match_result_t result = triggers[i]->memory_access_match(proc, operation, address, data); + if (result != MATCH_NONE && !triggers[i]->chain()) { + *action = triggers[i]->action; + return result; + } + + chain_ok = true; + } + return MATCH_NONE; +} + +reg_t module_t::tdata1_read(const processor_t * const proc, unsigned index) const noexcept +{ + return triggers[index]->tdata1_read(proc); +} + +bool module_t::tdata1_write(processor_t * const proc, unsigned index, const reg_t val) noexcept +{ + bool result = triggers[index]->tdata1_write(proc, val); + proc->trigger_updated(triggers); + return result; +} + +reg_t module_t::tdata2_read(const processor_t * const proc, unsigned index) const noexcept +{ + return triggers[index]->tdata2_read(proc); +} + +bool module_t::tdata2_write(processor_t * const proc, unsigned index, const reg_t val) noexcept +{ + bool result = triggers[index]->tdata2_write(proc, val); + proc->trigger_updated(triggers); + return result; +} + + +}; diff --git a/vendor/riscv-isa-sim/riscv/triggers.h b/vendor/riscv-isa-sim/riscv/triggers.h new file mode 100644 index 00000000..ad294c8a --- /dev/null +++ b/vendor/riscv-isa-sim/riscv/triggers.h @@ -0,0 +1,138 @@ +#ifndef _RISCV_TRIGGERS_H +#define _RISCV_TRIGGERS_H + +#include + +#include "decode.h" + +namespace triggers { + +typedef enum { + OPERATION_EXECUTE, + OPERATION_STORE, + OPERATION_LOAD, +} operation_t; + +typedef enum +{ + ACTION_DEBUG_EXCEPTION = MCONTROL_ACTION_DEBUG_EXCEPTION, + ACTION_DEBUG_MODE = MCONTROL_ACTION_DEBUG_MODE, + ACTION_TRACE_START = MCONTROL_ACTION_TRACE_START, + ACTION_TRACE_STOP = MCONTROL_ACTION_TRACE_STOP, + ACTION_TRACE_EMIT = MCONTROL_ACTION_TRACE_EMIT +} action_t; + +typedef enum { + MATCH_NONE, + MATCH_FIRE_BEFORE, + MATCH_FIRE_AFTER +} match_result_t; + +class matched_t +{ + public: + matched_t(triggers::operation_t operation, reg_t address, reg_t data, action_t action) : + operation(operation), address(address), data(data), action(action) {} + + triggers::operation_t operation; + reg_t address; + reg_t data; + action_t action; +}; + +class trigger_t { +public: + virtual match_result_t memory_access_match(processor_t * const proc, + operation_t operation, reg_t address, reg_t data) = 0; + + virtual reg_t tdata1_read(const processor_t * const proc) const noexcept = 0; + virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept = 0; + virtual reg_t tdata2_read(const processor_t * const proc) const noexcept = 0; + virtual bool tdata2_write(processor_t * const proc, const reg_t val) noexcept = 0; + + virtual bool chain() const { return false; } + virtual bool execute() const { return false; } + virtual bool store() const { return false; } + virtual bool load() const { return false; } + +public: + bool dmode; + action_t action; + + virtual ~trigger_t() {}; + +protected: + trigger_t() : dmode(false), action(ACTION_DEBUG_EXCEPTION) {}; +}; + +class mcontrol_t : public trigger_t { +public: + typedef enum + { + MATCH_EQUAL = MCONTROL_MATCH_EQUAL, + MATCH_NAPOT = MCONTROL_MATCH_NAPOT, + MATCH_GE = MCONTROL_MATCH_GE, + MATCH_LT = MCONTROL_MATCH_LT, + MATCH_MASK_LOW = MCONTROL_MATCH_MASK_LOW, + MATCH_MASK_HIGH = MCONTROL_MATCH_MASK_HIGH + } match_t; + + mcontrol_t(); + + virtual reg_t tdata1_read(const processor_t * const proc) const noexcept override; + virtual bool tdata1_write(processor_t * const proc, const reg_t val) noexcept override; + virtual reg_t tdata2_read(const processor_t * const proc) const noexcept override; + virtual bool tdata2_write(processor_t * const proc, const reg_t val) noexcept override; + + virtual bool chain() const override { return chain_bit; } + virtual bool execute() const override { return execute_bit; } + virtual bool store() const override { return store_bit; } + virtual bool load() const override { return load_bit; } + + virtual match_result_t memory_access_match(processor_t * const proc, + operation_t operation, reg_t address, reg_t data) override; + +private: + bool simple_match(unsigned xlen, reg_t value) const; + +public: + uint8_t type; + uint8_t maskmax; + bool select; + bool timing; + bool chain_bit; + match_t match; + bool m; + bool h; + bool s; + bool u; + bool execute_bit; + bool store_bit; + bool load_bit; + reg_t tdata2; + +}; + +class module_t { +public: + module_t(unsigned count); + ~module_t(); + + unsigned count() const { return triggers.size(); } + + match_result_t memory_access_match(action_t * const action, + operation_t operation, reg_t address, reg_t data); + + reg_t tdata1_read(const processor_t * const proc, unsigned index) const noexcept; + bool tdata1_write(processor_t * const proc, unsigned index, const reg_t val) noexcept; + reg_t tdata2_read(const processor_t * const proc, unsigned index) const noexcept; + bool tdata2_write(processor_t * const proc, unsigned index, const reg_t val) noexcept; + + processor_t *proc; +private: + std::vector triggers; +}; + +}; + +#endif diff --git a/vendor/riscv-isa-sim/scripts/config.guess b/vendor/riscv-isa-sim/scripts/config.guess new file mode 100644 index 00000000..699b3a10 --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/config.guess @@ -0,0 +1,1698 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2020 Free Software Foundation, Inc. + +timestamp='2020-11-19' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess +# +# Please send patches to . + + +me=$(echo "$0" | sed -e 's,.*/,,') + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2020 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +tmp= +# shellcheck disable=SC2172 +trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15 + +set_cc_for_build() { + # prevent multiple calls if $tmp is already set + test "$tmp" && return 0 + : "${TMPDIR=/tmp}" + # shellcheck disable=SC2039 + { tmp=$( (umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null) && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } + dummy=$tmp/dummy + case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in + ,,) echo "int x;" > "$dummy.c" + for driver in cc gcc c89 c99 ; do + if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then + CC_FOR_BUILD="$driver" + break + fi + done + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; + esac +} + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if test -f /.attbin/uname ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=$( (uname -m) 2>/dev/null) || UNAME_MACHINE=unknown +UNAME_RELEASE=$( (uname -r) 2>/dev/null) || UNAME_RELEASE=unknown +UNAME_SYSTEM=$( (uname -s) 2>/dev/null) || UNAME_SYSTEM=unknown +UNAME_VERSION=$( (uname -v) 2>/dev/null) || UNAME_VERSION=unknown + +case "$UNAME_SYSTEM" in +Linux|GNU|GNU/*) + LIBC=unknown + + set_cc_for_build + cat <<-EOF > "$dummy.c" + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #elif defined(__GLIBC__) + LIBC=gnu + #else + #include + /* First heuristic to detect musl libc. */ + #ifdef __DEFINED_va_list + LIBC=musl + #endif + #endif + EOF + eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g')" + + # Second heuristic to detect musl libc. + if [ "$LIBC" = unknown ] && + command -v ldd >/dev/null && + ldd --version 2>&1 | grep -q ^musl; then + LIBC=musl + fi + + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + if [ "$LIBC" = unknown ]; then + LIBC=gnu + fi + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \ + "/sbin/$sysctl" 2>/dev/null || \ + "/usr/sbin/$sysctl" 2>/dev/null || \ + echo unknown)) + case "$UNAME_MACHINE_ARCH" in + aarch64eb) machine=aarch64_be-unknown ;; + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=$(echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,') + endian=$(echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p') + machine="${arch}${endian}"-unknown + ;; + *) machine="$UNAME_MACHINE_ARCH"-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "$UNAME_MACHINE_ARCH" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "$UNAME_MACHINE_ARCH" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=$(echo "$UNAME_MACHINE_ARCH" | sed -e "$expr") + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "$UNAME_VERSION" in + Debian*) + release='-gnu' + ;; + *) + release=$(echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2) + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "$machine-${os}${release}${abi-}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=$(arch | sed 's/Bitrig.//') + echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=$(arch | sed 's/OpenBSD.//') + echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=$(arch | sed 's/^.*BSD\.//') + echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" + exit ;; + *:MidnightBSD:*:*) + echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" + exit ;; + *:ekkoBSD:*:*) + echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" + exit ;; + *:SolidBSD:*:*) + echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" + exit ;; + *:OS108:*:*) + echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE" + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:MirBSD:*:*) + echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" + exit ;; + *:Sortix:*:*) + echo "$UNAME_MACHINE"-unknown-sortix + exit ;; + *:Twizzler:*:*) + echo "$UNAME_MACHINE"-unknown-twizzler + exit ;; + *:Redox:*:*) + echo "$UNAME_MACHINE"-unknown-redox + exit ;; + mips:OSF1:*.*) + echo mips-dec-osf1 + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $3}') + ;; + *5.*) + UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $4}') + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=$(/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1) + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo "$UNAME_MACHINE"-dec-osf"$(echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)" + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo "$UNAME_MACHINE"-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo "$UNAME_MACHINE"-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix"$UNAME_RELEASE" + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "$( (/bin/universe) 2>/dev/null)" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case $(/usr/bin/uname -p) in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo "$UNAME_MACHINE"-ibm-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')" + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')" + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux"$UNAME_RELEASE" + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo "$SUN_ARCH"-pc-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" + exit ;; + sun4*:SunOS:*:*) + case "$(/usr/bin/arch -k)" in + Series*|S4*) + UNAME_RELEASE=$(uname -v) + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/')" + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos"$UNAME_RELEASE" + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=$( (sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null) + test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 + case "$(/bin/arch)" in + sun3) + echo m68k-sun-sunos"$UNAME_RELEASE" + ;; + sun4) + echo sparc-sun-sunos"$UNAME_RELEASE" + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos"$UNAME_RELEASE" + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint"$UNAME_RELEASE" + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint"$UNAME_RELEASE" + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint"$UNAME_RELEASE" + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint"$UNAME_RELEASE" + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten"$UNAME_RELEASE" + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten"$UNAME_RELEASE" + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix"$UNAME_RELEASE" + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix"$UNAME_RELEASE" + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix"$UNAME_RELEASE" + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && + dummyarg=$(echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p') && + SYSTEM_NAME=$("$dummy" "$dummyarg") && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos"$UNAME_RELEASE" + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=$(/usr/bin/uname -p) + if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110 + then + if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \ + test "$TARGET_BINARY_INTERFACE"x = x + then + echo m88k-dg-dgux"$UNAME_RELEASE" + else + echo m88k-dg-dguxbcs"$UNAME_RELEASE" + fi + else + echo i586-dg-dgux"$UNAME_RELEASE" + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/g')" + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'$(uname -s)'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if test -x /usr/bin/oslevel ; then + IBM_REV=$(/usr/bin/oslevel) + else + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + fi + echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=$(/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }') + if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if test -x /usr/bin/lslpp ; then + IBM_REV=$(/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/) + else + IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + fi + echo "$IBM_ARCH"-ibm-aix"$IBM_REV" + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//') + case "$UNAME_MACHINE" in + 9000/31?) HP_ARCH=m68000 ;; + 9000/[34]??) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if test -x /usr/bin/getconf; then + sc_cpu_version=$(/usr/bin/getconf SC_CPU_VERSION 2>/dev/null) + sc_kernel_bits=$(/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null) + case "$sc_cpu_version" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "$sc_kernel_bits" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if test "$HP_ARCH" = ""; then + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=$("$dummy") + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if test "$HP_ARCH" = hppa2.0w + then + set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo "$HP_ARCH"-hp-hpux"$HPUX_REV" + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//') + echo ia64-hp-hpux"$HPUX_REV" + exit ;; + 3050*:HI-UX:*:*) + set_cc_for_build + sed 's/^ //' << EOF > "$dummy.c" + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if test -x /usr/sbin/sysversion ; then + echo "$UNAME_MACHINE"-unknown-osf1mk + else + echo "$UNAME_MACHINE"-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=$(uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz) + FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///') + FUJITSU_REL=$(echo "$UNAME_RELEASE" | sed -e 's/ /_/') + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///') + FUJITSU_REL=$(echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/') + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi"$UNAME_RELEASE" + exit ;; + *:BSD/OS:*:*) + echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" + exit ;; + arm:FreeBSD:*:*) + UNAME_PROCESSOR=$(uname -p) + set_cc_for_build + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabi + else + echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabihf + fi + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=$(/usr/bin/uname -p) + case "$UNAME_PROCESSOR" in + amd64) + UNAME_PROCESSOR=x86_64 ;; + i386) + UNAME_PROCESSOR=i586 ;; + esac + echo "$UNAME_PROCESSOR"-unknown-freebsd"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')" + exit ;; + i*:CYGWIN*:*) + echo "$UNAME_MACHINE"-pc-cygwin + exit ;; + *:MINGW64*:*) + echo "$UNAME_MACHINE"-pc-mingw64 + exit ;; + *:MINGW*:*) + echo "$UNAME_MACHINE"-pc-mingw32 + exit ;; + *:MSYS*:*) + echo "$UNAME_MACHINE"-pc-msys + exit ;; + i*:PW*:*) + echo "$UNAME_MACHINE"-pc-pw32 + exit ;; + *:Interix*:*) + case "$UNAME_MACHINE" in + x86) + echo i586-pc-interix"$UNAME_RELEASE" + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix"$UNAME_RELEASE" + exit ;; + IA64) + echo ia64-unknown-interix"$UNAME_RELEASE" + exit ;; + esac ;; + i*:UWIN*:*) + echo "$UNAME_MACHINE"-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-pc-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" + exit ;; + *:GNU:*:*) + # the GNU system + echo "$(echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,')-unknown-$LIBC$(echo "$UNAME_RELEASE"|sed -e 's,/.*$,,')" + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo "$UNAME_MACHINE-unknown-$(echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]")$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')-$LIBC" + exit ;; + *:Minix:*:*) + echo "$UNAME_MACHINE"-unknown-minix + exit ;; + aarch64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + alpha:Linux:*:*) + case $(sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null) in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + arm*:Linux:*:*) + set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi + else + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + cris:Linux:*:*) + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + crisv32:Linux:*:*) + echo "$UNAME_MACHINE"-axis-linux-"$LIBC" + exit ;; + e2k:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + frv:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + hexagon:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + i*86:Linux:*:*) + echo "$UNAME_MACHINE"-pc-linux-"$LIBC" + exit ;; + ia64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + k1om:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + m32r*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + m68*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + set_cc_for_build + IS_GLIBC=0 + test x"${LIBC}" = xgnu && IS_GLIBC=1 + sed 's/^ //' << EOF > "$dummy.c" + #undef CPU + #undef mips + #undef mipsel + #undef mips64 + #undef mips64el + #if ${IS_GLIBC} && defined(_ABI64) + LIBCABI=gnuabi64 + #else + #if ${IS_GLIBC} && defined(_ABIN32) + LIBCABI=gnuabin32 + #else + LIBCABI=${LIBC} + #endif + #endif + + #if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa64r6 + #else + #if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 + CPU=mipsisa32r6 + #else + #if defined(__mips64) + CPU=mips64 + #else + CPU=mips + #endif + #endif + #endif + + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + MIPS_ENDIAN=el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + MIPS_ENDIAN= + #else + MIPS_ENDIAN= + #endif + #endif +EOF + eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI')" + test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; } + ;; + mips64el:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-"$LIBC" + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-"$LIBC" + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-"$LIBC" + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case $(grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2) in + PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; + PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; + *) echo hppa-unknown-linux-"$LIBC" ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-"$LIBC" + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-"$LIBC" + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-"$LIBC" + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-"$LIBC" + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" + exit ;; + sh64*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + sh*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + tile*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + vax:Linux:*:*) + echo "$UNAME_MACHINE"-dec-linux-"$LIBC" + exit ;; + x86_64:Linux:*:*) + set_cc_for_build + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_X32 >/dev/null + then + LIBCABI="$LIBC"x32 + fi + fi + echo "$UNAME_MACHINE"-pc-linux-"$LIBCABI" + exit ;; + xtensa*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo "$UNAME_MACHINE"-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo "$UNAME_MACHINE"-unknown-stop + exit ;; + i*86:atheos:*:*) + echo "$UNAME_MACHINE"-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo "$UNAME_MACHINE"-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos"$UNAME_RELEASE" + exit ;; + i*86:*DOS:*:*) + echo "$UNAME_MACHINE"-pc-msdosdjgpp + exit ;; + i*86:*:4.*:*) + UNAME_REL=$(echo "$UNAME_RELEASE" | sed 's/\/MP$//') + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" + else + echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case $(/bin/uname -X | grep "^Machine") in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}" + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=$(sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=$( (/bin/uname -X|grep Release|sed -e 's/.*= //')) + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" + else + echo "$UNAME_MACHINE"-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos"$UNAME_RELEASE" + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos"$UNAME_RELEASE" + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos"$UNAME_RELEASE" + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos"$UNAME_RELEASE" + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv"$UNAME_RELEASE" + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=$( (uname -p) 2>/dev/null) + echo "$UNAME_MACHINE"-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo "$UNAME_MACHINE"-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux"$UNAME_RELEASE" + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if test -d /usr/nec; then + echo mips-nec-sysv"$UNAME_RELEASE" + else + echo mips-unknown-sysv"$UNAME_RELEASE" + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux"$UNAME_RELEASE" + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux"$UNAME_RELEASE" + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux"$UNAME_RELEASE" + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux"$UNAME_RELEASE" + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux"$UNAME_RELEASE" + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux"$UNAME_RELEASE" + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux"$UNAME_RELEASE" + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody"$UNAME_RELEASE" + exit ;; + *:Rhapsody:*:*) + echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" + exit ;; + arm64:Darwin:*:*) + echo aarch64-apple-darwin"$UNAME_RELEASE" + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=$(uname -p) + case $UNAME_PROCESSOR in + unknown) UNAME_PROCESSOR=powerpc ;; + esac + if command -v xcode-select > /dev/null 2> /dev/null && \ + ! xcode-select --print-path > /dev/null 2> /dev/null ; then + # Avoid executing cc if there is no toolchain installed as + # cc will be a stub that puts up a graphical alert + # prompting the user to install developer tools. + CC_FOR_BUILD=no_compiler_found + else + set_cc_for_build + fi + if test "$CC_FOR_BUILD" != no_compiler_found; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc + if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_PPC >/dev/null + then + UNAME_PROCESSOR=powerpc + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # uname -m returns i386 or x86_64 + UNAME_PROCESSOR=$UNAME_MACHINE + fi + echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=$(uname -p) + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-*:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSR-*:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSV-*:NONSTOP_KERNEL:*:*) + echo nsv-tandem-nsk"$UNAME_RELEASE" + exit ;; + NSX-*:NONSTOP_KERNEL:*:*) + echo nsx-tandem-nsk"$UNAME_RELEASE" + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + # shellcheck disable=SC2154 + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo "$UNAME_MACHINE"-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux"$UNAME_RELEASE" + exit ;; + *:DragonFly:*:*) + echo "$UNAME_MACHINE"-unknown-dragonfly"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')" + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=$( (uname -p) 2>/dev/null) + case "$UNAME_MACHINE" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo "$UNAME_MACHINE"-pc-skyos"$(echo "$UNAME_RELEASE" | sed -e 's/ .*$//')" + exit ;; + i*86:rdos:*:*) + echo "$UNAME_MACHINE"-pc-rdos + exit ;; + i*86:AROS:*:*) + echo "$UNAME_MACHINE"-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo "$UNAME_MACHINE"-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; + *:Unleashed:*:*) + echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE" + exit ;; +esac + +# No uname command or uname output not recognized. +set_cc_for_build +cat > "$dummy.c" < +#include +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#include +#if defined(_SIZE_T_) || defined(SIGLOST) +#include +#endif +#endif +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=$( (hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null); + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); +#endif + +#if defined (vax) +#if !defined (ultrix) +#include +#if defined (BSD) +#if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +#else +#if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#endif +#else + printf ("vax-dec-bsd\n"); exit (0); +#endif +#else +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname un; + uname (&un); + printf ("vax-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("vax-dec-ultrix\n"); exit (0); +#endif +#endif +#endif +#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) +#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) +#if defined(_SIZE_T_) || defined(SIGLOST) + struct utsname *un; + uname (&un); + printf ("mips-dec-ultrix%s\n", un.release); exit (0); +#else + printf ("mips-dec-ultrix\n"); exit (0); +#endif +#endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=$($dummy) && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. +test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; } + +echo "$0: unable to guess system type" >&2 + +case "$UNAME_MACHINE:$UNAME_SYSTEM" in + mips:Linux | mips64:Linux) + # If we got here on MIPS GNU/Linux, output extra information. + cat >&2 <&2 <&2 </dev/null || echo unknown) +uname -r = $( (uname -r) 2>/dev/null || echo unknown) +uname -s = $( (uname -s) 2>/dev/null || echo unknown) +uname -v = $( (uname -v) 2>/dev/null || echo unknown) + +/usr/bin/uname -p = $( (/usr/bin/uname -p) 2>/dev/null) +/bin/uname -X = $( (/bin/uname -X) 2>/dev/null) + +hostinfo = $( (hostinfo) 2>/dev/null) +/bin/universe = $( (/bin/universe) 2>/dev/null) +/usr/bin/arch -k = $( (/usr/bin/arch -k) 2>/dev/null) +/bin/arch = $( (/bin/arch) 2>/dev/null) +/usr/bin/oslevel = $( (/usr/bin/oslevel) 2>/dev/null) +/usr/convex/getsysinfo = $( (/usr/convex/getsysinfo) 2>/dev/null) + +UNAME_MACHINE = "$UNAME_MACHINE" +UNAME_RELEASE = "$UNAME_RELEASE" +UNAME_SYSTEM = "$UNAME_SYSTEM" +UNAME_VERSION = "$UNAME_VERSION" +EOF +fi + +exit 1 + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/riscv-isa-sim/scripts/config.sub b/vendor/riscv-isa-sim/scripts/config.sub new file mode 100644 index 00000000..19c9553b --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/config.sub @@ -0,0 +1,1854 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2020 Free Software Foundation, Inc. + +timestamp='2020-12-02' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# https://git.savannah.gnu.org/cgit/config.git/plain/config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=$(echo "$0" | sed -e 's,.*/,,') + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Options: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2020 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo "$1" + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Split fields of configuration type +# shellcheck disable=SC2162 +IFS="-" read field1 field2 field3 field4 <&2 + exit 1 + ;; + *-*-*-*) + basic_machine=$field1-$field2 + basic_os=$field3-$field4 + ;; + *-*-*) + # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two + # parts + maybe_os=$field2-$field3 + case $maybe_os in + nto-qnx* | linux-* | uclinux-uclibc* \ + | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ + | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ + | storm-chaos* | os2-emx* | rtmk-nova*) + basic_machine=$field1 + basic_os=$maybe_os + ;; + android-linux) + basic_machine=$field1-unknown + basic_os=linux-android + ;; + *) + basic_machine=$field1-$field2 + basic_os=$field3 + ;; + esac + ;; + *-*) + # A lone config we happen to match not fitting any pattern + case $field1-$field2 in + decstation-3100) + basic_machine=mips-dec + basic_os= + ;; + *-*) + # Second component is usually, but not always the OS + case $field2 in + # Prevent following clause from handling this valid os + sun*os*) + basic_machine=$field1 + basic_os=$field2 + ;; + # Manufacturers + dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ + | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ + | unicom* | ibm* | next | hp | isi* | apollo | altos* \ + | convergent* | ncr* | news | 32* | 3600* | 3100* \ + | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \ + | ultra | tti* | harris | dolphin | highlevel | gould \ + | cbm | ns | masscomp | apple | axis | knuth | cray \ + | microblaze* | sim | cisco \ + | oki | wec | wrs | winbond) + basic_machine=$field1-$field2 + basic_os= + ;; + *) + basic_machine=$field1 + basic_os=$field2 + ;; + esac + ;; + esac + ;; + *) + # Convert single-component short-hands not valid as part of + # multi-component configurations. + case $field1 in + 386bsd) + basic_machine=i386-pc + basic_os=bsd + ;; + a29khif) + basic_machine=a29k-amd + basic_os=udi + ;; + adobe68k) + basic_machine=m68010-adobe + basic_os=scout + ;; + alliant) + basic_machine=fx80-alliant + basic_os= + ;; + altos | altos3068) + basic_machine=m68k-altos + basic_os= + ;; + am29k) + basic_machine=a29k-none + basic_os=bsd + ;; + amdahl) + basic_machine=580-amdahl + basic_os=sysv + ;; + amiga) + basic_machine=m68k-unknown + basic_os= + ;; + amigaos | amigados) + basic_machine=m68k-unknown + basic_os=amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + basic_os=sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + basic_os=sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + basic_os=bsd + ;; + aros) + basic_machine=i386-pc + basic_os=aros + ;; + aux) + basic_machine=m68k-apple + basic_os=aux + ;; + balance) + basic_machine=ns32k-sequent + basic_os=dynix + ;; + blackfin) + basic_machine=bfin-unknown + basic_os=linux + ;; + cegcc) + basic_machine=arm-unknown + basic_os=cegcc + ;; + convex-c1) + basic_machine=c1-convex + basic_os=bsd + ;; + convex-c2) + basic_machine=c2-convex + basic_os=bsd + ;; + convex-c32) + basic_machine=c32-convex + basic_os=bsd + ;; + convex-c34) + basic_machine=c34-convex + basic_os=bsd + ;; + convex-c38) + basic_machine=c38-convex + basic_os=bsd + ;; + cray) + basic_machine=j90-cray + basic_os=unicos + ;; + crds | unos) + basic_machine=m68k-crds + basic_os= + ;; + da30) + basic_machine=m68k-da30 + basic_os= + ;; + decstation | pmax | pmin | dec3100 | decstatn) + basic_machine=mips-dec + basic_os= + ;; + delta88) + basic_machine=m88k-motorola + basic_os=sysv3 + ;; + dicos) + basic_machine=i686-pc + basic_os=dicos + ;; + djgpp) + basic_machine=i586-pc + basic_os=msdosdjgpp + ;; + ebmon29k) + basic_machine=a29k-amd + basic_os=ebmon + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + basic_os=ose + ;; + gmicro) + basic_machine=tron-gmicro + basic_os=sysv + ;; + go32) + basic_machine=i386-pc + basic_os=go32 + ;; + h8300hms) + basic_machine=h8300-hitachi + basic_os=hms + ;; + h8300xray) + basic_machine=h8300-hitachi + basic_os=xray + ;; + h8500hms) + basic_machine=h8500-hitachi + basic_os=hms + ;; + harris) + basic_machine=m88k-harris + basic_os=sysv3 + ;; + hp300 | hp300hpux) + basic_machine=m68k-hp + basic_os=hpux + ;; + hp300bsd) + basic_machine=m68k-hp + basic_os=bsd + ;; + hppaosf) + basic_machine=hppa1.1-hp + basic_os=osf + ;; + hppro) + basic_machine=hppa1.1-hp + basic_os=proelf + ;; + i386mach) + basic_machine=i386-mach + basic_os=mach + ;; + isi68 | isi) + basic_machine=m68k-isi + basic_os=sysv + ;; + m68knommu) + basic_machine=m68k-unknown + basic_os=linux + ;; + magnum | m3230) + basic_machine=mips-mips + basic_os=sysv + ;; + merlin) + basic_machine=ns32k-utek + basic_os=sysv + ;; + mingw64) + basic_machine=x86_64-pc + basic_os=mingw64 + ;; + mingw32) + basic_machine=i686-pc + basic_os=mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + basic_os=mingw32ce + ;; + monitor) + basic_machine=m68k-rom68k + basic_os=coff + ;; + morphos) + basic_machine=powerpc-unknown + basic_os=morphos + ;; + moxiebox) + basic_machine=moxie-unknown + basic_os=moxiebox + ;; + msdos) + basic_machine=i386-pc + basic_os=msdos + ;; + msys) + basic_machine=i686-pc + basic_os=msys + ;; + mvs) + basic_machine=i370-ibm + basic_os=mvs + ;; + nacl) + basic_machine=le32-unknown + basic_os=nacl + ;; + ncr3000) + basic_machine=i486-ncr + basic_os=sysv4 + ;; + netbsd386) + basic_machine=i386-pc + basic_os=netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + basic_os=linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + basic_os=newsos + ;; + news1000) + basic_machine=m68030-sony + basic_os=newsos + ;; + necv70) + basic_machine=v70-nec + basic_os=sysv + ;; + nh3000) + basic_machine=m68k-harris + basic_os=cxux + ;; + nh[45]000) + basic_machine=m88k-harris + basic_os=cxux + ;; + nindy960) + basic_machine=i960-intel + basic_os=nindy + ;; + mon960) + basic_machine=i960-intel + basic_os=mon960 + ;; + nonstopux) + basic_machine=mips-compaq + basic_os=nonstopux + ;; + os400) + basic_machine=powerpc-ibm + basic_os=os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + basic_os=ose + ;; + os68k) + basic_machine=m68k-none + basic_os=os68k + ;; + paragon) + basic_machine=i860-intel + basic_os=osf + ;; + parisc) + basic_machine=hppa-unknown + basic_os=linux + ;; + psp) + basic_machine=mipsallegrexel-sony + basic_os=psp + ;; + pw32) + basic_machine=i586-unknown + basic_os=pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + basic_os=rdos + ;; + rdos32) + basic_machine=i386-pc + basic_os=rdos + ;; + rom68k) + basic_machine=m68k-rom68k + basic_os=coff + ;; + sa29200) + basic_machine=a29k-amd + basic_os=udi + ;; + sei) + basic_machine=mips-sei + basic_os=seiux + ;; + sequent) + basic_machine=i386-sequent + basic_os= + ;; + sps7) + basic_machine=m68k-bull + basic_os=sysv2 + ;; + st2000) + basic_machine=m68k-tandem + basic_os= + ;; + stratus) + basic_machine=i860-stratus + basic_os=sysv4 + ;; + sun2) + basic_machine=m68000-sun + basic_os= + ;; + sun2os3) + basic_machine=m68000-sun + basic_os=sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + basic_os=sunos4 + ;; + sun3) + basic_machine=m68k-sun + basic_os= + ;; + sun3os3) + basic_machine=m68k-sun + basic_os=sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + basic_os=sunos4 + ;; + sun4) + basic_machine=sparc-sun + basic_os= + ;; + sun4os3) + basic_machine=sparc-sun + basic_os=sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + basic_os=sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + basic_os=solaris2 + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + basic_os= + ;; + sv1) + basic_machine=sv1-cray + basic_os=unicos + ;; + symmetry) + basic_machine=i386-sequent + basic_os=dynix + ;; + t3e) + basic_machine=alphaev5-cray + basic_os=unicos + ;; + t90) + basic_machine=t90-cray + basic_os=unicos + ;; + toad1) + basic_machine=pdp10-xkl + basic_os=tops20 + ;; + tpf) + basic_machine=s390x-ibm + basic_os=tpf + ;; + udi29k) + basic_machine=a29k-amd + basic_os=udi + ;; + ultra3) + basic_machine=a29k-nyu + basic_os=sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + basic_os=none + ;; + vaxv) + basic_machine=vax-dec + basic_os=sysv + ;; + vms) + basic_machine=vax-dec + basic_os=vms + ;; + vsta) + basic_machine=i386-pc + basic_os=vsta + ;; + vxworks960) + basic_machine=i960-wrs + basic_os=vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + basic_os=vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + basic_os=vxworks + ;; + xbox) + basic_machine=i686-pc + basic_os=mingw32 + ;; + ymp) + basic_machine=ymp-cray + basic_os=unicos + ;; + *) + basic_machine=$1 + basic_os= + ;; + esac + ;; +esac + +# Decode 1-component or ad-hoc basic machines +case $basic_machine in + # Here we handle the default manufacturer of certain CPU types. It is in + # some cases the only manufacturer, in others, it is the most popular. + w89k) + cpu=hppa1.1 + vendor=winbond + ;; + op50n) + cpu=hppa1.1 + vendor=oki + ;; + op60c) + cpu=hppa1.1 + vendor=oki + ;; + ibm*) + cpu=i370 + vendor=ibm + ;; + orion105) + cpu=clipper + vendor=highlevel + ;; + mac | mpw | mac-mpw) + cpu=m68k + vendor=apple + ;; + pmac | pmac-mpw) + cpu=powerpc + vendor=apple + ;; + + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + cpu=m68000 + vendor=att + ;; + 3b*) + cpu=we32k + vendor=att + ;; + bluegene*) + cpu=powerpc + vendor=ibm + basic_os=cnk + ;; + decsystem10* | dec10*) + cpu=pdp10 + vendor=dec + basic_os=tops10 + ;; + decsystem20* | dec20*) + cpu=pdp10 + vendor=dec + basic_os=tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + cpu=m68k + vendor=motorola + ;; + dpx2*) + cpu=m68k + vendor=bull + basic_os=sysv3 + ;; + encore | umax | mmax) + cpu=ns32k + vendor=encore + ;; + elxsi) + cpu=elxsi + vendor=elxsi + basic_os=${basic_os:-bsd} + ;; + fx2800) + cpu=i860 + vendor=alliant + ;; + genix) + cpu=ns32k + vendor=ns + ;; + h3050r* | hiux*) + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + cpu=m68000 + vendor=hp + ;; + hp9k3[2-9][0-9]) + cpu=m68k + vendor=hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + cpu=hppa1.1 + vendor=hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + cpu=hppa1.1 + vendor=hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + cpu=hppa1.0 + vendor=hp + ;; + i*86v32) + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv32 + ;; + i*86v4*) + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv4 + ;; + i*86v) + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=sysv + ;; + i*86sol2) + cpu=$(echo "$1" | sed -e 's/86.*/86/') + vendor=pc + basic_os=solaris2 + ;; + j90 | j90-cray) + cpu=j90 + vendor=cray + basic_os=${basic_os:-unicos} + ;; + iris | iris4d) + cpu=mips + vendor=sgi + case $basic_os in + irix*) + ;; + *) + basic_os=irix4 + ;; + esac + ;; + miniframe) + cpu=m68000 + vendor=convergent + ;; + *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) + cpu=m68k + vendor=atari + basic_os=mint + ;; + news-3600 | risc-news) + cpu=mips + vendor=sony + basic_os=newsos + ;; + next | m*-next) + cpu=m68k + vendor=next + case $basic_os in + openstep*) + ;; + nextstep*) + ;; + ns2*) + basic_os=nextstep2 + ;; + *) + basic_os=nextstep3 + ;; + esac + ;; + np1) + cpu=np1 + vendor=gould + ;; + op50n-* | op60c-*) + cpu=hppa1.1 + vendor=oki + basic_os=proelf + ;; + pa-hitachi) + cpu=hppa1.1 + vendor=hitachi + basic_os=hiuxwe2 + ;; + pbd) + cpu=sparc + vendor=tti + ;; + pbb) + cpu=m68k + vendor=tti + ;; + pc532) + cpu=ns32k + vendor=pc532 + ;; + pn) + cpu=pn + vendor=gould + ;; + power) + cpu=power + vendor=ibm + ;; + ps2) + cpu=i386 + vendor=ibm + ;; + rm[46]00) + cpu=mips + vendor=siemens + ;; + rtpc | rtpc-*) + cpu=romp + vendor=ibm + ;; + sde) + cpu=mipsisa32 + vendor=sde + basic_os=${basic_os:-elf} + ;; + simso-wrs) + cpu=sparclite + vendor=wrs + basic_os=vxworks + ;; + tower | tower-32) + cpu=m68k + vendor=ncr + ;; + vpp*|vx|vx-*) + cpu=f301 + vendor=fujitsu + ;; + w65) + cpu=w65 + vendor=wdc + ;; + w89k-*) + cpu=hppa1.1 + vendor=winbond + basic_os=proelf + ;; + none) + cpu=none + vendor=none + ;; + leon|leon[3-9]) + cpu=sparc + vendor=$basic_machine + ;; + leon-*|leon[3-9]-*) + cpu=sparc + vendor=$(echo "$basic_machine" | sed 's/-.*//') + ;; + + *-*) + # shellcheck disable=SC2162 + IFS="-" read cpu vendor <&2 + exit 1 + ;; + esac + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $vendor in + digital*) + vendor=dec + ;; + commodore*) + vendor=cbm + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if test x$basic_os != x +then + +# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just +# set os. +case $basic_os in + gnu/linux*) + kernel=linux + os=$(echo $basic_os | sed -e 's|gnu/linux|gnu|') + ;; + os2-emx) + kernel=os2 + os=$(echo $basic_os | sed -e 's|os2-emx|emx|') + ;; + nto-qnx*) + kernel=nto + os=$(echo $basic_os | sed -e 's|nto-qnx|qnx|') + ;; + *-*) + # shellcheck disable=SC2162 + IFS="-" read kernel os <&2 + exit 1 + ;; +esac + +# As a final step for OS-related things, validate the OS-kernel combination +# (given a valid OS), if there is a kernel. +case $kernel-$os in + linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* | linux-musl* | linux-uclibc* ) + ;; + uclinux-uclibc* ) + ;; + -dietlibc* | -newlib* | -musl* | -uclibc* ) + # These are just libc implementations, not actual OSes, and thus + # require a kernel. + echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 + exit 1 + ;; + kfreebsd*-gnu* | kopensolaris*-gnu*) + ;; + nto-qnx*) + ;; + os2-emx) + ;; + *-eabi* | *-gnueabi*) + ;; + -*) + # Blank kernel with real OS is always fine. + ;; + *-*) + echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 + exit 1 + ;; +esac + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +case $vendor in + unknown) + case $cpu-$os in + *-riscix*) + vendor=acorn + ;; + *-sunos*) + vendor=sun + ;; + *-cnk* | *-aix*) + vendor=ibm + ;; + *-beos*) + vendor=be + ;; + *-hpux*) + vendor=hp + ;; + *-mpeix*) + vendor=hp + ;; + *-hiux*) + vendor=hitachi + ;; + *-unos*) + vendor=crds + ;; + *-dgux*) + vendor=dg + ;; + *-luna*) + vendor=omron + ;; + *-genix*) + vendor=ns + ;; + *-clix*) + vendor=intergraph + ;; + *-mvs* | *-opened*) + vendor=ibm + ;; + *-os400*) + vendor=ibm + ;; + s390-* | s390x-*) + vendor=ibm + ;; + *-ptx*) + vendor=sequent + ;; + *-tpf*) + vendor=ibm + ;; + *-vxsim* | *-vxworks* | *-windiss*) + vendor=wrs + ;; + *-aux*) + vendor=apple + ;; + *-hms*) + vendor=hitachi + ;; + *-mpw* | *-macos*) + vendor=apple + ;; + *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*) + vendor=atari + ;; + *-vos*) + vendor=stratus + ;; + esac + ;; +esac + +echo "$cpu-$vendor-${kernel:+$kernel-}$os" +exit + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/vendor/riscv-isa-sim/scripts/install.sh b/vendor/riscv-isa-sim/scripts/install.sh new file mode 100755 index 00000000..89fc9b09 --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/install.sh @@ -0,0 +1,238 @@ +#! /bin/sh +# +# install - install a program, script, or datafile +# This comes from X11R5. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. +# + + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit="${DOITPROG-}" + + +# put in absolute paths if you don't have them in your path; or use env. vars. + +mvprog="${MVPROG-mv}" +cpprog="${CPPROG-cp}" +chmodprog="${CHMODPROG-chmod}" +chownprog="${CHOWNPROG-chown}" +chgrpprog="${CHGRPPROG-chgrp}" +stripprog="${STRIPPROG-strip}" +rmprog="${RMPROG-rm}" +mkdirprog="${MKDIRPROG-mkdir}" + +tranformbasename="" +transform_arg="" +instcmd="$mvprog" +chmodcmd="$chmodprog 0755" +chowncmd="" +chgrpcmd="" +stripcmd="" +rmcmd="$rmprog -f" +mvcmd="$mvprog" +src="" +dst="" +dir_arg="" + +while [ x"$1" != x ]; do + case $1 in + -c) instcmd="$cpprog" + shift + continue;; + + -d) dir_arg=true + shift + continue;; + + -m) chmodcmd="$chmodprog $2" + shift + shift + continue;; + + -o) chowncmd="$chownprog $2" + shift + shift + continue;; + + -g) chgrpcmd="$chgrpprog $2" + shift + shift + continue;; + + -s) stripcmd="$stripprog" + shift + continue;; + + -t=*) transformarg=`echo $1 | sed 's/-t=//'` + shift + continue;; + + -b=*) transformbasename=`echo $1 | sed 's/-b=//'` + shift + continue;; + + *) if [ x"$src" = x ] + then + src=$1 + else + # this colon is to work around a 386BSD /bin/sh bug + : + dst=$1 + fi + shift + continue;; + esac +done + +if [ x"$src" = x ] +then + echo "install: no input file specified" + exit 1 +else + true +fi + +if [ x"$dir_arg" != x ]; then + dst=$src + src="" + + if [ -d $dst ]; then + instcmd=: + else + instcmd=mkdir + fi +else + +# Waiting for this to be detected by the "$instcmd $src $dsttmp" command +# might cause directories to be created, which would be especially bad +# if $src (and thus $dsttmp) contains '*'. + + if [ -f $src -o -d $src ] + then + true + else + echo "install: $src does not exist" + exit 1 + fi + + if [ x"$dst" = x ] + then + echo "install: no destination specified" + exit 1 + else + true + fi + +# If destination is a directory, append the input filename; if your system +# does not like double slashes in filenames, you may need to add some logic + + if [ -d $dst ] + then + dst="$dst"/`basename $src` + else + true + fi +fi + +## this sed command emulates the dirname command +dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` + +# Make sure that the destination directory exists. +# this part is taken from Noah Friedman's mkinstalldirs script + +# Skip lots of stat calls in the usual case. +if [ ! -d "$dstdir" ]; then +defaultIFS=' +' +IFS="${IFS-${defaultIFS}}" + +oIFS="${IFS}" +# Some sh's can't handle IFS=/ for some reason. +IFS='%' +set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` +IFS="${oIFS}" + +pathcomp='' + +while [ $# -ne 0 ] ; do + pathcomp="${pathcomp}${1}" + shift + + if [ ! -d "${pathcomp}" ] ; + then + $mkdirprog "${pathcomp}" + else + true + fi + + pathcomp="${pathcomp}/" +done +fi + +if [ x"$dir_arg" != x ] +then + $doit $instcmd $dst && + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi +else + +# If we're going to rename the final executable, determine the name now. + + if [ x"$transformarg" = x ] + then + dstfile=`basename $dst` + else + dstfile=`basename $dst $transformbasename | + sed $transformarg`$transformbasename + fi + +# don't allow the sed command to completely eliminate the filename + + if [ x"$dstfile" = x ] + then + dstfile=`basename $dst` + else + true + fi + +# Make a temp file name in the proper directory. + + dsttmp=$dstdir/#inst.$$# + +# Move or copy the file name to the temp name + + $doit $instcmd $src $dsttmp && + + trap "rm -f ${dsttmp}" 0 && + +# and set any options; do chmod last to preserve setuid bits + +# If any of these fail, we abort the whole thing. If we want to +# ignore errors from any of these, just make sure not to ignore +# errors from the above "$doit $instcmd $src $dsttmp" command. + + if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && + if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && + if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && + if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && + +# Now rename the file to the real destination. + + $doit $rmcmd -f $dstdir/$dstfile && + $doit $mvcmd $dsttmp $dstdir/$dstfile + +fi && + + +exit 0 diff --git a/vendor/riscv-isa-sim/scripts/mk-install-dirs.sh b/vendor/riscv-isa-sim/scripts/mk-install-dirs.sh new file mode 100755 index 00000000..644b5f72 --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/mk-install-dirs.sh @@ -0,0 +1,40 @@ +#! /bin/sh +# mkinstalldirs --- make directory hierarchy +# Author: Noah Friedman +# Created: 1993-05-16 +# Public domain + +# $Id: mkinstalldirs,v 1.1 2003/09/09 22:24:03 mhampton Exp $ + +errstatus=0 + +for file +do + set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'` + shift + + pathcomp= + for d + do + pathcomp="$pathcomp$d" + case "$pathcomp" in + -* ) pathcomp=./$pathcomp ;; + esac + + if test ! -d "$pathcomp"; then + echo "mkdir $pathcomp" 1>&2 + + mkdir "$pathcomp" || lasterr=$? + + if test ! -d "$pathcomp"; then + errstatus=$lasterr + fi + fi + + pathcomp="$pathcomp/" + done +done + +exit $errstatus + +# mkinstalldirs ends here diff --git a/vendor/riscv-isa-sim/scripts/vcs-version.sh b/vendor/riscv-isa-sim/scripts/vcs-version.sh new file mode 100755 index 00000000..692c071e --- /dev/null +++ b/vendor/riscv-isa-sim/scripts/vcs-version.sh @@ -0,0 +1,117 @@ +#!/usr/bin/env bash +#========================================================================= +# vcs-version.sh [options] [src-dir] +#========================================================================= +# +# -h Display this message +# -v Verbose mode +# +# This script will create a version string by querying a version control +# system. The string is appropriate for use in installations and +# distributions. Currently this script assumes we are using git as our +# version control system but it would be possible to check and see if we +# are using an alternative version control system and create a version +# string appropriately. +# +# The script uses git describe plus a few other git commands to create a +# version strings in the following format: +# +# X.Y[-Z-gN][-dirty] +# +# where X is the major release, Y is the minor release, Z is the number +# of commits since the X.Y release, N is an eight digit abbreviated SHA +# hash of the most recent commit and the dirty suffix is appended when +# the working directory used to create the installation or distribution +# is not a pristine checkout. Here are some example version strings: +# +# 0.0 : initial import +# 0.0-3-g99ef6933 : 3rd commit since initial import (N=99ef6933) +# 1.0 : release 1.0 +# 1.1-12-g3487ab12 : 12th commit since release 1.1 (N=3487ab12) +# 1.1-12-g3487ab12-dirty : 12th commit since release 1.1 (N=3487ab12) +# +# The last example is from a dirty working directory. To find the last +# release, the script looks for the last tag (does not need to be an +# annotated tag, but probably should be) which matches the format rel-*. +# If there is no such tag in the history, then the script uses 0.0 as +# the release number and counts the total number of commits since the +# original import for the commit count. +# +# If the current directory is not within the working directory, then the +# path to the source directory should be supplied on the command line. +# +# Author : Christopher Batten +# Date : August 5, 2009 + +set -e + +#------------------------------------------------------------------------- +# Command line parsing +#------------------------------------------------------------------------- + +if ( test "$1" = "-h" ); then + echo "" + sed -n '3p' $0 | sed -e 's/#//' + sed -n '5,/^$/p' $0 | sed -e 's/#//' + exit 1 +fi + +# Source directory command line option + +src_dir="." +if ( test -n "$1" ); then + src_dir="$1" +fi + +#------------------------------------------------------------------------- +# Verify source directory +#------------------------------------------------------------------------- +# If the source directory is not a git working directory output a +# question mark. A distribution will not be in a working directory, but +# the build system should be structured such that this script is not +# executed (and instead the version information should probably come +# from configure). If the user does not specify a source directory use +# the current directory. + +if !( git rev-parse --is-inside-work-tree &> /dev/null ); then + echo "?" + exit 1; +fi + +top_dir=`git rev-parse --show-cdup` +cd ./${top_dir} + +#------------------------------------------------------------------------- +# Create the version string +#------------------------------------------------------------------------- +# See if we can do a describe based on a tag and if not use a default +# release number of 0.0 so that we always get canonical version number + +if ( git describe --tags --match "rel-*" &> /dev/null ); then + ver_str=`git describe --tags --match "rel-*" | sed 's/rel-//'` +else + ver_num="0.0" + ver_commits=`git rev-list --all | wc -l | tr -d " "` + ver_sha=`git describe --tags --match "rel-*" --always` + ver_str="${ver_num}-${ver_commits}-g${ver_sha}" +fi + +# Add a dirty suffix if working directory is dirty + +if !( git diff --quiet ); then + ver_str="${ver_str}-dirty" +else + untracked=`git ls-files --directory --exclude-standard --others -t` + if ( test -n "${untracked}" ); then + ver_str="${ver_str}-dirty" + fi +fi + +# Output the final version string + +echo "${ver_str}" + +# Final exit status + +exit 0; + diff --git a/vendor/riscv-isa-sim/softfloat/f128_add.c b/vendor/riscv-isa-sim/softfloat/f128_add.c new file mode 100644 index 00000000..6568ab6f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_add.c @@ -0,0 +1,78 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t f128_add( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2) + float128_t + (*magsFuncPtr)( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast64_t, bool ); +#endif + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + signB = signF128UI64( uiB64 ); +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) + if ( signA == signB ) { + return softfloat_addMagsF128( uiA64, uiA0, uiB64, uiB0, signA ); + } else { + return softfloat_subMagsF128( uiA64, uiA0, uiB64, uiB0, signA ); + } +#else + magsFuncPtr = + (signA == signB) ? softfloat_addMagsF128 : softfloat_subMagsF128; + return (*magsFuncPtr)( uiA64, uiA0, uiB64, uiB0, signA ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_classify.c b/vendor/riscv-isa-sim/softfloat/f128_classify.c new file mode 100755 index 00000000..1092a9b5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_classify.c @@ -0,0 +1,37 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f128_classify( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + + uint_fast16_t infOrNaN = expF128UI64( uiA64 ) == 0x7FFF; + uint_fast16_t subnormalOrZero = expF128UI64( uiA64 ) == 0; + bool sign = signF128UI64( uiA64 ); + bool fracZero = fracF128UI64( uiA64 ) == 0 && uiA0 == 0; + bool isNaN = isNaNF128UI( uiA64, uiA0 ); + bool isSNaN = softfloat_isSigNaNF128UI( uiA64, uiA0 ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_div.c b/vendor/riscv-isa-sim/softfloat/f128_div.c new file mode 100644 index 00000000..9384e756 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_div.c @@ -0,0 +1,199 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f128_div( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + int_fast32_t expA; + struct uint128 sigA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signB; + int_fast32_t expB; + struct uint128 sigB; + bool signZ; + struct exp32_sig128 normExpSig; + int_fast32_t expZ; + struct uint128 rem; + uint_fast32_t recip32; + int ix; + uint_fast64_t q64; + uint_fast32_t q; + struct uint128 term; + uint_fast32_t qs[3]; + uint_fast64_t sigZExtra; + struct uint128 sigZ, uiZ; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + signB = signF128UI64( uiB64 ); + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 ) goto propagateNaN; + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! (sigB.v64 | sigB.v0) ) { + if ( ! (expA | sigA.v64 | sigA.v0) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) goto zero; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x3FFE; + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sigB.v64 |= UINT64_C( 0x0001000000000000 ); + rem = sigA; + if ( softfloat_lt128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ) ) { + --expZ; + rem = softfloat_add128( sigA.v64, sigA.v0, sigA.v64, sigA.v0 ); + } + recip32 = softfloat_approxRecip32_1( sigB.v64>>17 ); + ix = 3; + for (;;) { + q64 = (uint_fast64_t) (uint32_t) (rem.v64>>19) * recip32; + q = (q64 + 0x80000000)>>32; + --ix; + if ( ix < 0 ) break; + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + term = softfloat_mul128By32( sigB.v64, sigB.v0, q ); + rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + --q; + rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } + qs[ix] = q; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ((q + 1) & 7) < 2 ) { + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + term = softfloat_mul128By32( sigB.v64, sigB.v0, q ); + rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + --q; + rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } else if ( softfloat_le128( sigB.v64, sigB.v0, rem.v64, rem.v0 ) ) { + ++q; + rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } + if ( rem.v64 | rem.v0 ) q |= 1; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZExtra = (uint64_t) ((uint_fast64_t) q<<60); + term = softfloat_shortShiftLeft128( 0, qs[1], 54 ); + sigZ = + softfloat_add128( + (uint_fast64_t) qs[2]<<19, ((uint_fast64_t) qs[0]<<25) + (q>>4), + term.v64, term.v0 + ); + return + softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); + goto uiZ0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ.v64 = packToF128UI64( signZ, 0, 0 ); + uiZ0: + uiZ.v0 = 0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_eq.c b/vendor/riscv-isa-sim/softfloat/f128_eq.c new file mode 100644 index 00000000..a0e1ad28 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_eq.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f128_eq( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + if ( + softfloat_isSigNaNF128UI( uiA64, uiA0 ) + || softfloat_isSigNaNF128UI( uiB64, uiB0 ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return + (uiA0 == uiB0) + && ( (uiA64 == uiB64) + || (! uiA0 && ! ((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_eq_signaling.c b/vendor/riscv-isa-sim/softfloat/f128_eq_signaling.c new file mode 100644 index 00000000..bd37b979 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_eq_signaling.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f128_eq_signaling( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return + (uiA0 == uiB0) + && ( (uiA64 == uiB64) + || (! uiA0 && ! ((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_isSignalingNaN.c b/vendor/riscv-isa-sim/softfloat/f128_isSignalingNaN.c new file mode 100644 index 00000000..fced58e5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f128_isSignalingNaN( float128_t a ) +{ + union ui128_f128 uA; + + uA.f = a; + return softfloat_isSigNaNF128UI( uA.ui.v64, uA.ui.v0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_le.c b/vendor/riscv-isa-sim/softfloat/f128_le.c new file mode 100644 index 00000000..9b0aa234 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_le.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f128_le( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signA, signB; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF128UI64( uiA64 ); + signB = signF128UI64( uiB64 ); + return + (signA != signB) + ? signA + || ! (((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + | uiA0 | uiB0) + : ((uiA64 == uiB64) && (uiA0 == uiB0)) + || (signA ^ softfloat_lt128( uiA64, uiA0, uiB64, uiB0 )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_le_quiet.c b/vendor/riscv-isa-sim/softfloat/f128_le_quiet.c new file mode 100644 index 00000000..3b440388 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_le_quiet.c @@ -0,0 +1,78 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f128_le_quiet( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signA, signB; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + if ( + softfloat_isSigNaNF128UI( uiA64, uiA0 ) + || softfloat_isSigNaNF128UI( uiB64, uiB0 ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF128UI64( uiA64 ); + signB = signF128UI64( uiB64 ); + return + (signA != signB) + ? signA + || ! (((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + | uiA0 | uiB0) + : ((uiA64 == uiB64) && (uiA0 == uiB0)) + || (signA ^ softfloat_lt128( uiA64, uiA0, uiB64, uiB0 )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_lt.c b/vendor/riscv-isa-sim/softfloat/f128_lt.c new file mode 100644 index 00000000..a28f95b7 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_lt.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f128_lt( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signA, signB; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF128UI64( uiA64 ); + signB = signF128UI64( uiB64 ); + return + (signA != signB) + ? signA + && (((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + | uiA0 | uiB0) + : ((uiA64 != uiB64) || (uiA0 != uiB0)) + && (signA ^ softfloat_lt128( uiA64, uiA0, uiB64, uiB0 )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_lt_quiet.c b/vendor/riscv-isa-sim/softfloat/f128_lt_quiet.c new file mode 100644 index 00000000..20146ee4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_lt_quiet.c @@ -0,0 +1,78 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f128_lt_quiet( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signA, signB; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + if ( isNaNF128UI( uiA64, uiA0 ) || isNaNF128UI( uiB64, uiB0 ) ) { + if ( + softfloat_isSigNaNF128UI( uiA64, uiA0 ) + || softfloat_isSigNaNF128UI( uiB64, uiB0 ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF128UI64( uiA64 ); + signB = signF128UI64( uiB64 ); + return + (signA != signB) + ? signA + && (((uiA64 | uiB64) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + | uiA0 | uiB0) + : ((uiA64 != uiB64) || (uiA0 != uiB0)) + && (signA ^ softfloat_lt128( uiA64, uiA0, uiB64, uiB0 )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_mul.c b/vendor/riscv-isa-sim/softfloat/f128_mul.c new file mode 100644 index 00000000..18716139 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_mul.c @@ -0,0 +1,163 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f128_mul( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + int_fast32_t expA; + struct uint128 sigA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signB; + int_fast32_t expB; + struct uint128 sigB; + bool signZ; + uint_fast64_t magBits; + struct exp32_sig128 normExpSig; + int_fast32_t expZ; + uint64_t sig256Z[4]; + uint_fast64_t sigZExtra; + struct uint128 sigZ; + struct uint128_extra sig128Extra; + struct uint128 uiZ; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + signB = signF128UI64( uiB64 ); + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( + (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) + ) { + goto propagateNaN; + } + magBits = expB | sigB.v64 | sigB.v0; + goto infArg; + } + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + magBits = expA | sigA.v64 | sigA.v0; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) goto zero; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! (sigB.v64 | sigB.v0) ) goto zero; + normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x4000; + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 16 ); + softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); + sigZExtra = sig256Z[indexWord( 4, 1 )] | (sig256Z[indexWord( 4, 0 )] != 0); + sigZ = + softfloat_add128( + sig256Z[indexWord( 4, 3 )], sig256Z[indexWord( 4, 2 )], + sigA.v64, sigA.v0 + ); + if ( UINT64_C( 0x0002000000000000 ) <= sigZ.v64 ) { + ++expZ; + sig128Extra = + softfloat_shortShiftRightJam128Extra( + sigZ.v64, sigZ.v0, sigZExtra, 1 ); + sigZ = sig128Extra.v; + sigZExtra = sig128Extra.extra; + } + return + softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + goto uiZ; + } + uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); + goto uiZ0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ.v64 = packToF128UI64( signZ, 0, 0 ); + uiZ0: + uiZ.v0 = 0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_mulAdd.c b/vendor/riscv-isa-sim/softfloat/f128_mulAdd.c new file mode 100644 index 00000000..b2e2142f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_mulAdd.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t f128_mulAdd( float128_t a, float128_t b, float128_t c ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + union ui128_f128 uC; + uint_fast64_t uiC64, uiC0; + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + uC.f = c; + uiC64 = uC.ui.v64; + uiC0 = uC.ui.v0; + return softfloat_mulAddF128( uiA64, uiA0, uiB64, uiB0, uiC64, uiC0, 0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_rem.c b/vendor/riscv-isa-sim/softfloat/f128_rem.c new file mode 100644 index 00000000..555d71eb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_rem.c @@ -0,0 +1,190 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f128_rem( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + int_fast32_t expA; + struct uint128 sigA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + int_fast32_t expB; + struct uint128 sigB; + struct exp32_sig128 normExpSig; + struct uint128 rem; + int_fast32_t expDiff; + uint_fast32_t q, recip32; + uint_fast64_t q64; + struct uint128 term, altRem, meanRem; + bool signRem; + struct uint128 uiZ; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( + (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) + ) { + goto propagateNaN; + } + goto invalid; + } + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! (sigB.v64 | sigB.v0) ) goto invalid; + normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) return a; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sigB.v64 |= UINT64_C( 0x0001000000000000 ); + rem = sigA; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + if ( expDiff ) { + --expB; + sigB = softfloat_add128( sigB.v64, sigB.v0, sigB.v64, sigB.v0 ); + q = 0; + } else { + q = softfloat_le128( sigB.v64, sigB.v0, rem.v64, rem.v0 ); + if ( q ) { + rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } + } + } else { + recip32 = softfloat_approxRecip32_1( sigB.v64>>17 ); + expDiff -= 30; + for (;;) { + q64 = (uint_fast64_t) (uint32_t) (rem.v64>>19) * recip32; + if ( expDiff < 0 ) break; + q = (q64 + 0x80000000)>>32; + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + term = softfloat_mul128By32( sigB.v64, sigB.v0, q ); + rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + rem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -29 here.) + *--------------------------------------------------------------------*/ + q = (uint32_t) (q64>>32)>>(~expDiff & 31); + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, expDiff + 30 ); + term = softfloat_mul128By32( sigB.v64, sigB.v0, q ); + rem = softfloat_sub128( rem.v64, rem.v0, term.v64, term.v0 ); + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + altRem = softfloat_add128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + goto selectRem; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem = softfloat_sub128( rem.v64, rem.v0, sigB.v64, sigB.v0 ); + } while ( ! (rem.v64 & UINT64_C( 0x8000000000000000 )) ); + selectRem: + meanRem = softfloat_add128( rem.v64, rem.v0, altRem.v64, altRem.v0 ); + if ( + (meanRem.v64 & UINT64_C( 0x8000000000000000 )) + || (! (meanRem.v64 | meanRem.v0) && (q & 1)) + ) { + rem = altRem; + } + signRem = signA; + if ( rem.v64 & UINT64_C( 0x8000000000000000 ) ) { + signRem = ! signRem; + rem = softfloat_sub128( 0, 0, rem.v64, rem.v0 ); + } + return softfloat_normRoundPackToF128( signRem, expB - 1, rem.v64, rem.v0 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_roundToInt.c b/vendor/riscv-isa-sim/softfloat/f128_roundToInt.c new file mode 100644 index 00000000..0f1f07ec --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_roundToInt.c @@ -0,0 +1,160 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t + f128_roundToInt( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + int_fast32_t exp; + struct uint128 uiZ; + uint_fast64_t lastBitMask, roundBitsMask; + bool roundNearEven; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + exp = expF128UI64( uiA64 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x402F <= exp ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( 0x406F <= exp ) { + if ( (exp == 0x7FFF) && (fracF128UI64( uiA64 ) | uiA0) ) { + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, 0, 0 ); + goto uiZ; + } + return a; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + lastBitMask = (uint_fast64_t) 2<<(0x406E - exp); + roundBitsMask = lastBitMask - 1; + uiZ.v64 = uiA64; + uiZ.v0 = uiA0; + roundNearEven = (roundingMode == softfloat_round_near_even); + if ( roundNearEven || (roundingMode == softfloat_round_near_maxMag) ) { + if ( exp == 0x402F ) { + if ( UINT64_C( 0x8000000000000000 ) <= uiZ.v0 ) { + ++uiZ.v64; + if ( + roundNearEven + && (uiZ.v0 == UINT64_C( 0x8000000000000000 )) + ) { + uiZ.v64 &= ~1; + } + } + } else { + uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, lastBitMask>>1 ); + if ( roundNearEven && ! (uiZ.v0 & roundBitsMask) ) { + uiZ.v0 &= ~lastBitMask; + } + } + } else if ( + roundingMode + == (signF128UI64( uiZ.v64 ) ? softfloat_round_min + : softfloat_round_max) + ) { + uiZ = softfloat_add128( uiZ.v64, uiZ.v0, 0, roundBitsMask ); + } + uiZ.v0 &= ~roundBitsMask; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( exp < 0x3FFF ) { + if ( ! ((uiA64 & UINT64_C( 0x7FFFFFFFFFFFFFFF )) | uiA0) ) { + return a; + } + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ.v64 = uiA64 & packToF128UI64( 1, 0, 0 ); + uiZ.v0 = 0; + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! (fracF128UI64( uiA64 ) | uiA0) ) break; + case softfloat_round_near_maxMag: + if ( exp == 0x3FFE ) uiZ.v64 |= packToF128UI64( 0, 0x3FFF, 0 ); + break; + case softfloat_round_min: + if ( uiZ.v64 ) uiZ.v64 = packToF128UI64( 1, 0x3FFF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ.v64 ) uiZ.v64 = packToF128UI64( 0, 0x3FFF, 0 ); + break; + } + goto uiZ; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + uiZ.v64 = uiA64; + uiZ.v0 = 0; + lastBitMask = (uint_fast64_t) 1<<(0x402F - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ.v64 += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ.v64 += lastBitMask>>1; + if ( ! ((uiZ.v64 & roundBitsMask) | uiA0) ) { + uiZ.v64 &= ~lastBitMask; + } + } else if ( + roundingMode + == (signF128UI64( uiZ.v64 ) ? softfloat_round_min + : softfloat_round_max) + ) { + uiZ.v64 = (uiZ.v64 | (uiA0 != 0)) + roundBitsMask; + } + uiZ.v64 &= ~roundBitsMask; + } + if ( exact && ((uiZ.v64 != uiA64) || (uiZ.v0 != uiA0)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_sqrt.c b/vendor/riscv-isa-sim/softfloat/f128_sqrt.c new file mode 100644 index 00000000..5b99694e --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_sqrt.c @@ -0,0 +1,201 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f128_sqrt( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + int_fast32_t expA; + struct uint128 sigA, uiZ; + struct exp32_sig128 normExpSig; + int_fast32_t expZ; + uint_fast32_t sig32A, recipSqrt32, sig32Z; + struct uint128 rem; + uint32_t qs[3]; + uint_fast32_t q; + uint_fast64_t x64, sig64Z; + struct uint128 y, term; + uint_fast64_t sigZExtra; + struct uint128 sigZ; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 ) { + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, 0, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA.v64 | sigA.v0) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) return a; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + | (`sig32Z' is guaranteed to be a lower bound on the square root of + | `sig32A', which makes `sig32Z' also a lower bound on the square root of + | `sigA'.) + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x3FFF)>>1) + 0x3FFE; + expA &= 1; + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sig32A = sigA.v64>>17; + recipSqrt32 = softfloat_approxRecipSqrt32_1( expA, sig32A ); + sig32Z = ((uint_fast64_t) sig32A * recipSqrt32)>>32; + if ( expA ) { + sig32Z >>= 1; + rem = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 12 ); + } else { + rem = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 13 ); + } + qs[2] = sig32Z; + rem.v64 -= (uint_fast64_t) sig32Z * sig32Z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + q = ((uint32_t) (rem.v64>>2) * (uint_fast64_t) recipSqrt32)>>32; + x64 = (uint_fast64_t) sig32Z<<32; + sig64Z = x64 + ((uint_fast64_t) q<<3); + y = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + /*------------------------------------------------------------------------ + | (Repeating this loop is a rare occurrence.) + *------------------------------------------------------------------------*/ + for (;;) { + term = softfloat_mul64ByShifted32To128( x64 + sig64Z, q ); + rem = softfloat_sub128( y.v64, y.v0, term.v64, term.v0 ); + if ( ! (rem.v64 & UINT64_C( 0x8000000000000000 )) ) break; + --q; + sig64Z -= 1<<3; + } + qs[1] = q; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + q = ((rem.v64>>2) * recipSqrt32)>>32; + y = softfloat_shortShiftLeft128( rem.v64, rem.v0, 29 ); + sig64Z <<= 1; + /*------------------------------------------------------------------------ + | (Repeating this loop is a rare occurrence.) + *------------------------------------------------------------------------*/ + for (;;) { + term = softfloat_shortShiftLeft128( 0, sig64Z, 32 ); + term = softfloat_add128( term.v64, term.v0, 0, (uint_fast64_t) q<<6 ); + term = softfloat_mul128By32( term.v64, term.v0, q ); + rem = softfloat_sub128( y.v64, y.v0, term.v64, term.v0 ); + if ( ! (rem.v64 & UINT64_C( 0x8000000000000000 )) ) break; + --q; + } + qs[0] = q; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + q = (((rem.v64>>2) * recipSqrt32)>>32) + 2; + sigZExtra = (uint64_t) ((uint_fast64_t) q<<59); + term = softfloat_shortShiftLeft128( 0, qs[1], 53 ); + sigZ = + softfloat_add128( + (uint_fast64_t) qs[2]<<18, ((uint_fast64_t) qs[0]<<24) + (q>>5), + term.v64, term.v0 + ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (q & 0xF) <= 2 ) { + q &= ~3; + sigZExtra = (uint64_t) ((uint_fast64_t) q<<59); + y = softfloat_shortShiftLeft128( sigZ.v64, sigZ.v0, 6 ); + y.v0 |= sigZExtra>>58; + term = softfloat_sub128( y.v64, y.v0, 0, q ); + y = softfloat_mul64ByShifted32To128( term.v0, q ); + term = softfloat_mul64ByShifted32To128( term.v64, q ); + term = softfloat_add128( term.v64, term.v0, 0, y.v64 ); + rem = softfloat_shortShiftLeft128( rem.v64, rem.v0, 20 ); + term = softfloat_sub128( term.v64, term.v0, rem.v64, rem.v0 ); + /*-------------------------------------------------------------------- + | The concatenation of `term' and `y.v0' is now the negative remainder + | (3 words altogether). + *--------------------------------------------------------------------*/ + if ( term.v64 & UINT64_C( 0x8000000000000000 ) ) { + sigZExtra |= 1; + } else { + if ( term.v64 | term.v0 | y.v0 ) { + if ( sigZExtra ) { + --sigZExtra; + } else { + sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, 0, 1 ); + sigZExtra = ~0; + } + } + } + } + return softfloat_roundPackToF128( 0, expZ, sigZ.v64, sigZ.v0, sigZExtra ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_sub.c b/vendor/riscv-isa-sim/softfloat/f128_sub.c new file mode 100644 index 00000000..ce2e5adb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_sub.c @@ -0,0 +1,78 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t f128_sub( float128_t a, float128_t b ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool signA; + union ui128_f128 uB; + uint_fast64_t uiB64, uiB0; + bool signB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2) + float128_t + (*magsFuncPtr)( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast64_t, bool ); +#endif + + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + signA = signF128UI64( uiA64 ); + uB.f = b; + uiB64 = uB.ui.v64; + uiB0 = uB.ui.v0; + signB = signF128UI64( uiB64 ); +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) + if ( signA == signB ) { + return softfloat_subMagsF128( uiA64, uiA0, uiB64, uiB0, signA ); + } else { + return softfloat_addMagsF128( uiA64, uiA0, uiB64, uiB0, signA ); + } +#else + magsFuncPtr = + (signA == signB) ? softfloat_subMagsF128 : softfloat_addMagsF128; + return (*magsFuncPtr)( uiA64, uiA0, uiB64, uiB0, signA ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_f16.c b/vendor/riscv-isa-sim/softfloat/f128_to_f16.c new file mode 100644 index 00000000..a910c12a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_f16.c @@ -0,0 +1,95 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f128_to_f16( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t frac64; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + frac64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FFF ) { + if ( frac64 ) { + softfloat_f128UIToCommonNaN( uiA64, uiA0, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = softfloat_shortShiftRightJam64( frac64, 34 ); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + exp -= 0x3FF1; + if ( sizeof (int_fast16_t) < sizeof (int_fast32_t) ) { + if ( exp < -0x40 ) exp = -0x40; + } + return softfloat_roundPackToF16( sign, exp, frac16 | 0x4000 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_f32.c b/vendor/riscv-isa-sim/softfloat/f128_to_f32.c new file mode 100644 index 00000000..d890d3eb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_f32.c @@ -0,0 +1,95 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f128_to_f32( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t frac64; + struct commonNaN commonNaN; + uint_fast32_t uiZ, frac32; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + frac64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FFF ) { + if ( frac64 ) { + softfloat_f128UIToCommonNaN( uiA64, uiA0, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac32 = softfloat_shortShiftRightJam64( frac64, 18 ); + if ( ! (exp | frac32) ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + exp -= 0x3F81; + if ( sizeof (int_fast16_t) < sizeof (int_fast32_t) ) { + if ( exp < -0x1000 ) exp = -0x1000; + } + return softfloat_roundPackToF32( sign, exp, frac32 | 0x40000000 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_f64.c b/vendor/riscv-isa-sim/softfloat/f128_to_f64.c new file mode 100644 index 00000000..e7aec201 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_f64.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f128_to_f64( float128_t a ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t frac64, frac0; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct uint128 frac128; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + frac64 = fracF128UI64( uiA64 ); + frac0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FFF ) { + if ( frac64 | frac0 ) { + softfloat_f128UIToCommonNaN( uiA64, uiA0, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac128 = softfloat_shortShiftLeft128( frac64, frac0, 14 ); + frac64 = frac128.v64 | (frac128.v0 != 0); + if ( ! (exp | frac64) ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + exp -= 0x3C01; + if ( sizeof (int_fast16_t) < sizeof (int_fast32_t) ) { + if ( exp < -0x1000 ) exp = -0x1000; + } + return + softfloat_roundPackToF64( + sign, exp, frac64 | UINT64_C( 0x4000000000000000 ) ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_i32.c b/vendor/riscv-isa-sim/softfloat/f128_to_i32.c new file mode 100644 index 00000000..507691cc --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_i32.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f128_to_i32( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0x7FFF) && (sig64 | sig0) ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig64 |= UINT64_C( 0x0001000000000000 ); + sig64 |= (sig0 != 0); + shiftDist = 0x4023 - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_i32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f128_to_i32_r_minMag.c new file mode 100644 index 00000000..fc9f84f1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_i32_r_minMag.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f128_to_i32_r_minMag( float128_t a, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + int_fast32_t exp; + uint_fast64_t sig64; + int_fast32_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( 49 <= shiftDist ) { + if ( exact && (exp | sig64) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF128UI64( uiA64 ); + if ( shiftDist < 18 ) { + if ( + sign && (shiftDist == 17) + && (sig64 < UINT64_C( 0x0000000000020000 )) + ) { + if ( exact && sig64 ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -0x7FFFFFFF - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && sig64 ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + absZ = sig64>>shiftDist; + if ( + exact && ((uint_fast64_t) (uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f128_to_i64( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + struct uint128 sig128; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -15 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && (sig64 | sig0) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + if ( shiftDist ) { + sig128 = softfloat_shortShiftLeft128( sig64, sig0, -shiftDist ); + sig64 = sig128.v64; + sig0 = sig128.v0; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( exp ) sig64 |= UINT64_C( 0x0001000000000000 ); + sigExtra = softfloat_shiftRightJam64Extra( sig64, sig0, shiftDist ); + sig64 = sigExtra.v; + sig0 = sigExtra.extra; + } + return softfloat_roundToI64( sign, sig64, sig0, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_i64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f128_to_i64_r_minMag.c new file mode 100644 index 00000000..7e0d63da --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_i64_r_minMag.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f128_to_i64_r_minMag( float128_t a, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + int_fast8_t negShiftDist; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( shiftDist < 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -14 ) { + if ( + (uiA64 == UINT64_C( 0xC03E000000000000 )) + && (sig0 < UINT64_C( 0x0002000000000000 )) + ) { + if ( exact && sig0 ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && (sig64 | sig0) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + negShiftDist = -shiftDist; + absZ = sig64<>(shiftDist & 63); + if ( exact && (uint64_t) (sig0<>shiftDist; + if ( exact && (sig0 || (absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + f128_to_ui32( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64; + int_fast32_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0x7FFF) && sig64 ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig64 |= UINT64_C( 0x0001000000000000 ); + shiftDist = 0x4023 - exp; + if ( 0 < shiftDist ) { + sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + } + return softfloat_roundToUI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_ui32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f128_to_ui32_r_minMag.c new file mode 100644 index 00000000..2097fb81 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_ui32_r_minMag.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f128_to_ui32_r_minMag( float128_t a, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + int_fast32_t exp; + uint_fast64_t sig64; + int_fast32_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ) | (uiA0 != 0); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( 49 <= shiftDist ) { + if ( exact && (exp | sig64) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF128UI64( uiA64 ); + if ( sign || (shiftDist < 17) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && sig64 ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + z = sig64>>shiftDist; + if ( exact && ((uint_fast64_t) z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + f128_to_ui64( float128_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + struct uint128 sig128; + struct uint64_extra sigExtra; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -15 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FFF) && (sig64 | sig0) ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64 |= UINT64_C( 0x0001000000000000 ); + if ( shiftDist ) { + sig128 = softfloat_shortShiftLeft128( sig64, sig0, -shiftDist ); + sig64 = sig128.v64; + sig0 = sig128.v0; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( exp ) sig64 |= UINT64_C( 0x0001000000000000 ); + sigExtra = softfloat_shiftRightJam64Extra( sig64, sig0, shiftDist ); + sig64 = sigExtra.v; + sig0 = sigExtra.extra; + } + return softfloat_roundToUI64( sign, sig64, sig0, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f128_to_ui64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f128_to_ui64_r_minMag.c new file mode 100644 index 00000000..fb16320a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f128_to_ui64_r_minMag.c @@ -0,0 +1,105 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f128_to_ui64_r_minMag( float128_t a, bool exact ) +{ + union ui128_f128 uA; + uint_fast64_t uiA64, uiA0; + bool sign; + int_fast32_t exp; + uint_fast64_t sig64, sig0; + int_fast32_t shiftDist; + int_fast8_t negShiftDist; + uint_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA64 = uA.ui.v64; + uiA0 = uA.ui.v0; + sign = signF128UI64( uiA64 ); + exp = expF128UI64( uiA64 ); + sig64 = fracF128UI64( uiA64 ); + sig0 = uiA0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x402F - exp; + if ( shiftDist < 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( sign || (shiftDist < -15) ) goto invalid; + sig64 |= UINT64_C( 0x0001000000000000 ); + negShiftDist = -shiftDist; + z = sig64<>(shiftDist & 63); + if ( exact && (uint64_t) (sig0<>shiftDist; + if ( exact && (sig0 || (z< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t f16_add( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 1) + float16_t (*magsFuncPtr)( uint_fast16_t, uint_fast16_t ); +#endif + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) + if ( signF16UI( uiA ^ uiB ) ) { + return softfloat_subMagsF16( uiA, uiB ); + } else { + return softfloat_addMagsF16( uiA, uiB ); + } +#else + magsFuncPtr = + signF16UI( uiA ^ uiB ) ? softfloat_subMagsF16 : softfloat_addMagsF16; + return (*magsFuncPtr)( uiA, uiB ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_classify.c b/vendor/riscv-isa-sim/softfloat/f16_classify.c new file mode 100755 index 00000000..9402ff13 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f16_classify( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF16UI( uiA ) == 0x1F; + uint_fast16_t subnormalOrZero = expF16UI( uiA ) == 0; + bool sign = signF16UI( uiA ); + bool fracZero = fracF16UI( uiA ) == 0; + bool isNaN = isNaNF16UI( uiA ); + bool isSNaN = softfloat_isSigNaNF16UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_div.c b/vendor/riscv-isa-sim/softfloat/f16_div.c new file mode 100644 index 00000000..71b5c29b --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_div.c @@ -0,0 +1,186 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +extern const uint16_t softfloat_approxRecip_1k0s[]; +extern const uint16_t softfloat_approxRecip_1k1s[]; + +float16_t f16_div( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool signA; + int_fast8_t expA; + uint_fast16_t sigA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signB; + int_fast8_t expB; + uint_fast16_t sigB; + bool signZ; + struct exp8_sig16 normExpSig; + int_fast8_t expZ; +#ifdef SOFTFLOAT_FAST_DIV32TO16 + uint_fast32_t sig32A; + uint_fast16_t sigZ; +#else + int index; + uint16_t r0; + uint_fast16_t sigZ, rem; +#endif + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF16UI( uiB ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF16Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0xE; + sigA |= 0x0400; + sigB |= 0x0400; +#ifdef SOFTFLOAT_FAST_DIV32TO16 + if ( sigA < sigB ) { + --expZ; + sig32A = (uint_fast32_t) sigA<<15; + } else { + sig32A = (uint_fast32_t) sigA<<14; + } + sigZ = sig32A / sigB; + if ( ! (sigZ & 7) ) sigZ |= ((uint_fast32_t) sigB * sigZ != sig32A); +#else + if ( sigA < sigB ) { + --expZ; + sigA <<= 5; + } else { + sigA <<= 4; + } + index = sigB>>6 & 0xF; + r0 = softfloat_approxRecip_1k0s[index] + - (((uint_fast32_t) softfloat_approxRecip_1k1s[index] + * (sigB & 0x3F)) + >>10); + sigZ = ((uint_fast32_t) sigA * r0)>>16; + rem = (sigA<<10) - sigZ * sigB; + sigZ += (rem * (uint_fast32_t) r0)>>26; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + ++sigZ; + if ( ! (sigZ & 7) ) { + sigZ &= ~1; + rem = (sigA<<10) - sigZ * sigB; + if ( rem & 0x8000 ) { + sigZ -= 2; + } else { + if ( rem ) sigZ |= 1; + } + } +#endif + return softfloat_roundPackToF16( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF16UI( signZ, 0x1F, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF16UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_eq.c b/vendor/riscv-isa-sim/softfloat/f16_eq.c new file mode 100644 index 00000000..37a60998 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f16_eq( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + if ( + softfloat_isSigNaNF16UI( uiA ) || softfloat_isSigNaNF16UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! (uint16_t) ((uiA | uiB)<<1); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_eq_signaling.c b/vendor/riscv-isa-sim/softfloat/f16_eq_signaling.c new file mode 100644 index 00000000..894f7b59 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f16_eq_signaling( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! (uint16_t) ((uiA | uiB)<<1); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_isSignalingNaN.c b/vendor/riscv-isa-sim/softfloat/f16_isSignalingNaN.c new file mode 100644 index 00000000..657805be --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f16_isSignalingNaN( float16_t a ) +{ + union ui16_f16 uA; + + uA.f = a; + return softfloat_isSigNaNF16UI( uA.ui ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_le.c b/vendor/riscv-isa-sim/softfloat/f16_le.c new file mode 100644 index 00000000..37eaf187 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_le.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f16_le( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF16UI( uiA ); + signB = signF16UI( uiB ); + return + (signA != signB) ? signA || ! (uint16_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_le_quiet.c b/vendor/riscv-isa-sim/softfloat/f16_le_quiet.c new file mode 100644 index 00000000..8391db74 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_le_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f16_le_quiet( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + if ( + softfloat_isSigNaNF16UI( uiA ) || softfloat_isSigNaNF16UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF16UI( uiA ); + signB = signF16UI( uiB ); + return + (signA != signB) ? signA || ! (uint16_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_lt.c b/vendor/riscv-isa-sim/softfloat/f16_lt.c new file mode 100644 index 00000000..3d3522a4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_lt.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f16_lt( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF16UI( uiA ); + signB = signF16UI( uiB ); + return + (signA != signB) ? signA && ((uint16_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_lt_quiet.c b/vendor/riscv-isa-sim/softfloat/f16_lt_quiet.c new file mode 100644 index 00000000..37f762cd --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_lt_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f16_lt_quiet( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF16UI( uiA ) || isNaNF16UI( uiB ) ) { + if ( + softfloat_isSigNaNF16UI( uiA ) || softfloat_isSigNaNF16UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF16UI( uiA ); + signB = signF16UI( uiB ); + return + (signA != signB) ? signA && ((uint16_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_mul.c b/vendor/riscv-isa-sim/softfloat/f16_mul.c new file mode 100644 index 00000000..255caa7e --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_mul.c @@ -0,0 +1,140 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f16_mul( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool signA; + int_fast8_t expA; + uint_fast16_t sigA; + union ui16_f16 uB; + uint_fast16_t uiB; + bool signB; + int_fast8_t expB; + uint_fast16_t sigB; + bool signZ; + uint_fast16_t magBits; + struct exp8_sig16 normExpSig; + int_fast8_t expZ; + uint_fast32_t sig32Z; + uint_fast16_t sigZ, uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF16UI( uiB ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA || ((expB == 0x1F) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF16Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0xF; + sigA = (sigA | 0x0400)<<4; + sigB = (sigB | 0x0400)<<5; + sig32Z = (uint_fast32_t) sigA * sigB; + sigZ = sig32Z>>16; + if ( sig32Z & 0xFFFF ) sigZ |= 1; + if ( sigZ < 0x4000 ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF16( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + } else { + uiZ = packToF16UI( signZ, 0x1F, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF16UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_mulAdd.c b/vendor/riscv-isa-sim/softfloat/f16_mulAdd.c new file mode 100644 index 00000000..40261963 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t f16_mulAdd( float16_t a, float16_t b, float16_t c ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; + union ui16_f16 uC; + uint_fast16_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF16( uiA, uiB, uiC, 0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_rem.c b/vendor/riscv-isa-sim/softfloat/f16_rem.c new file mode 100644 index 00000000..86c319dd --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_rem.c @@ -0,0 +1,171 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f16_rem( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool signA; + int_fast8_t expA; + uint_fast16_t sigA; + union ui16_f16 uB; + uint_fast16_t uiB; + int_fast8_t expB; + uint_fast16_t sigB; + struct exp8_sig16 normExpSig; + uint16_t rem; + int_fast8_t expDiff; + uint_fast16_t q; + uint32_t recip32, q32; + uint16_t altRem, meanRem; + bool signRem; + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA || ((expB == 0x1F) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF16Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | 0x0400; + sigB |= 0x0400; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 3; + if ( expDiff ) { + rem <<= 2; + q = 0; + } else { + rem <<= 3; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( (uint_fast32_t) sigB<<21 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 4; + expDiff -= 31; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | which is believed to be the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 3; + for (;;) { + q32 = (rem * (uint_fast64_t) recip32)>>16; + if ( expDiff < 0 ) break; + rem = -((uint_fast16_t) q32 * sigB); + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -30 here.) + *--------------------------------------------------------------------*/ + q32 >>= ~expDiff & 31; + q = q32; + rem = (rem<<(expDiff + 30)) - q * sigB; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & 0x8000) ); + meanRem = rem + altRem; + if ( (meanRem & 0x8000) || (! meanRem && (q & 1)) ) rem = altRem; + signRem = signA; + if ( 0x8000 <= rem ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF16( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_roundToInt.c b/vendor/riscv-isa-sim/softfloat/f16_roundToInt.c new file mode 100644 index 00000000..9bbd47eb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_roundToInt.c @@ -0,0 +1,112 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f16_roundToInt( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t uiZ, lastBitMask, roundBitsMask; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0xE ) { + if ( ! (uint16_t) (uiA<<1) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF16UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF16UI( uiA ) ) break; + case softfloat_round_near_maxMag: + if ( exp == 0xE ) uiZ |= packToF16UI( 0, 0xF, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF16UI( 1, 0xF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF16UI( 0, 0xF, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x19 <= exp ) { + if ( (exp == 0x1F) && fracF16UI( uiA ) ) { + uiZ = softfloat_propagateNaNF16UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast16_t) 1<<(0x19 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF16UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_sqrt.c b/vendor/riscv-isa-sim/softfloat/f16_sqrt.c new file mode 100644 index 00000000..7ff29239 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_sqrt.c @@ -0,0 +1,136 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[]; + +float16_t f16_sqrt( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool signA; + int_fast8_t expA; + uint_fast16_t sigA, uiZ; + struct exp8_sig16 normExpSig; + int_fast8_t expZ; + int index; + uint_fast16_t r0; + uint_fast32_t ESqrR0; + uint16_t sigma0; + uint_fast16_t recipSqrt16, sigZ, shiftedSigZ; + uint16_t negRem; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF16UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = ((expA - 0xF)>>1) + 0xE; + expA &= 1; + sigA |= 0x0400; + index = (sigA>>6 & 0xE) + expA; + r0 = softfloat_approxRecipSqrt_1k0s[index] + - (((uint_fast32_t) softfloat_approxRecipSqrt_1k1s[index] + * (sigA & 0x7F)) + >>11); + ESqrR0 = ((uint_fast32_t) r0 * r0)>>1; + if ( expA ) ESqrR0 >>= 1; + sigma0 = ~(uint_fast16_t) ((ESqrR0 * sigA)>>16); + recipSqrt16 = r0 + (((uint_fast32_t) r0 * sigma0)>>25); + if ( ! (recipSqrt16 & 0x8000) ) recipSqrt16 = 0x8000; + sigZ = ((uint_fast32_t) (sigA<<5) * recipSqrt16)>>16; + if ( expA ) sigZ >>= 1; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + ++sigZ; + if ( ! (sigZ & 7) ) { + shiftedSigZ = sigZ>>1; + negRem = shiftedSigZ * shiftedSigZ; + sigZ &= ~1; + if ( negRem & 0x8000 ) { + sigZ |= 1; + } else { + if ( negRem ) --sigZ; + } + } + return softfloat_roundPackToF16( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_sub.c b/vendor/riscv-isa-sim/softfloat/f16_sub.c new file mode 100644 index 00000000..811f239f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_sub.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t f16_sub( float16_t a, float16_t b ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + union ui16_f16 uB; + uint_fast16_t uiB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 1) + float16_t (*magsFuncPtr)( uint_fast16_t, uint_fast16_t ); +#endif + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) + if ( signF16UI( uiA ^ uiB ) ) { + return softfloat_addMagsF16( uiA, uiB ); + } else { + return softfloat_subMagsF16( uiA, uiB ); + } +#else + magsFuncPtr = + signF16UI( uiA ^ uiB ) ? softfloat_addMagsF16 : softfloat_subMagsF16; + return (*magsFuncPtr)( uiA, uiB ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_f128.c b/vendor/riscv-isa-sim/softfloat/f16_to_f128.c new file mode 100644 index 00000000..961cdaaf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_f128.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f16_to_f128( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + struct commonNaN commonNaN; + struct uint128 uiZ; + struct exp8_sig16 normExpSig; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + if ( frac ) { + softfloat_f16UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF128UI( &commonNaN ); + } else { + uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); + uiZ.v0 = 0; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ.v64 = packToF128UI64( sign, 0, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + normExpSig = softfloat_normSubnormalF16Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ.v64 = packToF128UI64( sign, exp + 0x3FF0, (uint_fast64_t) frac<<38 ); + uiZ.v0 = 0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_f32.c b/vendor/riscv-isa-sim/softfloat/f16_to_f32.c new file mode 100644 index 00000000..fb8b3819 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_f32.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f16_to_f32( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + struct commonNaN commonNaN; + uint_fast32_t uiZ; + struct exp8_sig16 normExpSig; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + if ( frac ) { + softfloat_f16UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF16Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF32UI( sign, exp + 0x70, (uint_fast32_t) frac<<13 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_f64.c b/vendor/riscv-isa-sim/softfloat/f16_to_f64.c new file mode 100644 index 00000000..4ab27ba0 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_f64.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f16_to_f64( float16_t a ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct exp8_sig16 normExpSig; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + if ( frac ) { + softfloat_f16UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF16Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF64UI( sign, exp + 0x3F0, (uint_fast64_t) frac<<42 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i16.c b/vendor/riscv-isa-sim/softfloat/f16_to_i16.c new file mode 100644 index 00000000..b0fbb7cc --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i16.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast16_t f16_to_i16( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f16_to_i32(a, roundingMode, exact); + + if (sig32 > INT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromPosOverflow; + } else if (sig32 < INT16_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromNegOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i32.c b/vendor/riscv-isa-sim/softfloat/f16_to_i32.c new file mode 100644 index 00000000..24b19846 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i32.c @@ -0,0 +1,87 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f16_to_i32( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + int_fast32_t sig32; + int_fast8_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + frac ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig32 = frac; + if ( exp ) { + sig32 |= 0x0400; + shiftDist = exp - 0x19; + if ( 0 <= shiftDist ) { + sig32 <<= shiftDist; + return sign ? -sig32 : sig32; + } + shiftDist = exp - 0x0D; + if ( 0 < shiftDist ) sig32 <<= shiftDist; + } + return + softfloat_roundToI32( + sign, (uint_fast32_t) sig32, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f16_to_i32_r_minMag.c new file mode 100644 index 00000000..ebb4965c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f16_to_i32_r_minMag( float16_t a, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t frac; + int_fast8_t shiftDist; + bool sign; + int_fast32_t alignedSig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = exp - 0x0F; + if ( shiftDist < 0 ) { + if ( exact && (exp | frac) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF16UI( uiA ); + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x1F) && frac ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + alignedSig = (int_fast32_t) (frac | 0x0400)<>= 10; + return sign ? -alignedSig : alignedSig; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i64.c b/vendor/riscv-isa-sim/softfloat/f16_to_i64.c new file mode 100644 index 00000000..c2417456 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i64.c @@ -0,0 +1,87 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f16_to_i64( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + int_fast32_t sig32; + int_fast8_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + frac ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig32 = frac; + if ( exp ) { + sig32 |= 0x0400; + shiftDist = exp - 0x19; + if ( 0 <= shiftDist ) { + sig32 <<= shiftDist; + return sign ? -sig32 : sig32; + } + shiftDist = exp - 0x0D; + if ( 0 < shiftDist ) sig32 <<= shiftDist; + } + return + softfloat_roundToI32( + sign, (uint_fast32_t) sig32, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f16_to_i64_r_minMag.c new file mode 100644 index 00000000..dc9a8d37 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i64_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f16_to_i64_r_minMag( float16_t a, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t frac; + int_fast8_t shiftDist; + bool sign; + int_fast32_t alignedSig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = exp - 0x0F; + if ( shiftDist < 0 ) { + if ( exact && (exp | frac) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF16UI( uiA ); + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x1F) && frac ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + alignedSig = (int_fast32_t) (frac | 0x0400)<>= 10; + return sign ? -alignedSig : alignedSig; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_i8.c b/vendor/riscv-isa-sim/softfloat/f16_to_i8.c new file mode 100644 index 00000000..23638cc1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_i8.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast8_t f16_to_i8( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f16_to_i32(a, roundingMode, exact); + + if (sig32 > INT8_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i8_fromPosOverflow; + } else if (sig32 < INT8_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i8_fromNegOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_ui16.c b/vendor/riscv-isa-sim/softfloat/f16_to_ui16.c new file mode 100644 index 00000000..81c4f8d9 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_ui16.c @@ -0,0 +1,54 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f16_to_ui16( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f16_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui16_fromPosOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_ui32.c b/vendor/riscv-isa-sim/softfloat/f16_to_ui32.c new file mode 100644 index 00000000..c99af39c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_ui32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f16_to_ui32( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + uint_fast32_t sig32; + int_fast8_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + frac ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig32 = frac; + if ( exp ) { + sig32 |= 0x0400; + shiftDist = exp - 0x19; + if ( (0 <= shiftDist) && ! sign ) { + return sig32< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f16_to_ui32_r_minMag( float16_t a, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t frac; + int_fast8_t shiftDist; + bool sign; + uint_fast32_t alignedSig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = exp - 0x0F; + if ( shiftDist < 0 ) { + if ( exact && (exp | frac) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF16UI( uiA ); + if ( sign || (exp == 0x1F) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x1F) && frac ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + alignedSig = (uint_fast32_t) (frac | 0x0400)<>10; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_ui64.c b/vendor/riscv-isa-sim/softfloat/f16_to_ui64.c new file mode 100644 index 00000000..dd260eae --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_ui64.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f16_to_ui64( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + bool sign; + int_fast8_t exp; + uint_fast16_t frac; + uint_fast32_t sig32; + int_fast8_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF16UI( uiA ); + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x1F ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + frac ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig32 = frac; + if ( exp ) { + sig32 |= 0x0400; + shiftDist = exp - 0x19; + if ( (0 <= shiftDist) && ! sign ) { + return sig32< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f16_to_ui64_r_minMag( float16_t a, bool exact ) +{ + union ui16_f16 uA; + uint_fast16_t uiA; + int_fast8_t exp; + uint_fast16_t frac; + int_fast8_t shiftDist; + bool sign; + uint_fast32_t alignedSig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF16UI( uiA ); + frac = fracF16UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = exp - 0x0F; + if ( shiftDist < 0 ) { + if ( exact && (exp | frac) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF16UI( uiA ); + if ( sign || (exp == 0x1F) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x1F) && frac ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + alignedSig = (uint_fast32_t) (frac | 0x0400)<>10; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f16_to_ui8.c b/vendor/riscv-isa-sim/softfloat/f16_to_ui8.c new file mode 100644 index 00000000..96124e12 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f16_to_ui8.c @@ -0,0 +1,54 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast8_t f16_to_ui8( float16_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f16_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT8_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui8_fromPosOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_add.c b/vendor/riscv-isa-sim/softfloat/f32_add.c new file mode 100644 index 00000000..4a51eccf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_add.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_add( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 1) + float32_t (*magsFuncPtr)( uint_fast32_t, uint_fast32_t ); +#endif + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_subMagsF32( uiA, uiB ); + } else { + return softfloat_addMagsF32( uiA, uiB ); + } +#else + magsFuncPtr = + signF32UI( uiA ^ uiB ) ? softfloat_subMagsF32 : softfloat_addMagsF32; + return (*magsFuncPtr)( uiA, uiB ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_classify.c b/vendor/riscv-isa-sim/softfloat/f32_classify.c new file mode 100755 index 00000000..83fad878 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f32_classify( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF32UI( uiA ) == 0xFF; + uint_fast16_t subnormalOrZero = expF32UI( uiA ) == 0; + bool sign = signF32UI( uiA ); + bool fracZero = fracF32UI( uiA ) == 0; + bool isNaN = isNaNF32UI( uiA ); + bool isSNaN = softfloat_isSigNaNF32UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_div.c b/vendor/riscv-isa-sim/softfloat/f32_div.c new file mode 100644 index 00000000..9d101254 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_div.c @@ -0,0 +1,180 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_div( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + uint_fast64_t sig64A; + uint_fast32_t sigZ; +#else + uint_fast32_t sigZ; + uint_fast64_t rem; +#endif + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x7E; + sigA |= 0x00800000; + sigB |= 0x00800000; +#ifdef SOFTFLOAT_FAST_DIV64TO32 + if ( sigA < sigB ) { + --expZ; + sig64A = (uint_fast64_t) sigA<<31; + } else { + sig64A = (uint_fast64_t) sigA<<30; + } + sigZ = sig64A / sigB; + if ( ! (sigZ & 0x3F) ) sigZ |= ((uint_fast64_t) sigB * sigZ != sig64A); +#else + if ( sigA < sigB ) { + --expZ; + sigA <<= 8; + } else { + sigA <<= 7; + } + sigB <<= 8; + sigZ = ((uint_fast64_t) sigA * softfloat_approxRecip32_1( sigB ))>>32; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + sigZ &= ~3; +#ifdef SOFTFLOAT_FAST_INT64 + rem = ((uint_fast64_t) sigA<<31) - (uint_fast64_t) sigZ * sigB; +#else + rem = ((uint_fast64_t) sigA<<32) - (uint_fast64_t) (sigZ<<1) * sigB; +#endif + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 4; + } else { + if ( rem ) sigZ |= 1; + } + } +#endif + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_eq.c b/vendor/riscv-isa-sim/softfloat/f32_eq.c new file mode 100644 index 00000000..5f07eee3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_eq( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_eq_signaling.c b/vendor/riscv-isa-sim/softfloat/f32_eq_signaling.c new file mode 100644 index 00000000..f5fcc824 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_eq_signaling( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_isSignalingNaN.c b/vendor/riscv-isa-sim/softfloat/f32_isSignalingNaN.c new file mode 100644 index 00000000..5004a5aa --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_isSignalingNaN( float32_t a ) +{ + union ui32_f32 uA; + + uA.f = a; + return softfloat_isSigNaNF32UI( uA.ui ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_le.c b/vendor/riscv-isa-sim/softfloat/f32_le.c new file mode 100644 index 00000000..77595fbb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_le.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_le( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_le_quiet.c b/vendor/riscv-isa-sim/softfloat/f32_le_quiet.c new file mode 100644 index 00000000..1ec91010 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_le_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_le_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_lt.c b/vendor/riscv-isa-sim/softfloat/f32_lt.c new file mode 100644 index 00000000..9e12843f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_lt.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f32_lt( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_lt_quiet.c b/vendor/riscv-isa-sim/softfloat/f32_lt_quiet.c new file mode 100644 index 00000000..9f83b810 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_lt_quiet.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f32_lt_quiet( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( + softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF32UI( uiA ); + signB = signF32UI( uiB ); + return + (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_mul.c b/vendor/riscv-isa-sim/softfloat/f32_mul.c new file mode 100644 index 00000000..a2a673f1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_mul.c @@ -0,0 +1,137 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_mul( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signZ; + uint_fast32_t magBits; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x7F; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<8; + sigZ = softfloat_shortShiftRightJam64( (uint_fast64_t) sigA * sigB, 32 ); + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + } else { + uiZ = packToF32UI( signZ, 0xFF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF32UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_mulAdd.c b/vendor/riscv-isa-sim/softfloat/f32_mulAdd.c new file mode 100644 index 00000000..e98021b7 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_mulAdd( float32_t a, float32_t b, float32_t c ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; + union ui32_f32 uC; + uint_fast32_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF32( uiA, uiB, uiC, 0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_rem.c b/vendor/riscv-isa-sim/softfloat/f32_rem.c new file mode 100644 index 00000000..771b1b94 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_rem.c @@ -0,0 +1,168 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_rem( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + union ui32_f32 uB; + uint_fast32_t uiB; + int_fast16_t expB; + uint_fast32_t sigB; + struct exp16_sig32 normExpSig; + uint32_t rem; + int_fast16_t expDiff; + uint32_t q, recip32, altRem, meanRem; + bool signRem; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | 0x00800000; + sigB |= 0x00800000; + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 6; + if ( expDiff ) { + rem <<= 5; + q = 0; + } else { + rem <<= 6; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB<<8 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 7; + expDiff -= 31; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | which is believed to be the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 6; + for (;;) { + q = (rem * (uint_fast64_t) recip32)>>32; + if ( expDiff < 0 ) break; + rem = -(q * (uint32_t) sigB); + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -30 here.) + *--------------------------------------------------------------------*/ + q >>= ~expDiff & 31; + rem = (rem<<(expDiff + 30)) - q * (uint32_t) sigB; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & 0x80000000) ); + meanRem = rem + altRem; + if ( (meanRem & 0x80000000) || (! meanRem && (q & 1)) ) rem = altRem; + signRem = signA; + if ( 0x80000000 <= rem ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF32( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_roundToInt.c b/vendor/riscv-isa-sim/softfloat/f32_roundToInt.c new file mode 100644 index 00000000..0861b840 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_roundToInt.c @@ -0,0 +1,112 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_roundToInt( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t uiZ, lastBitMask, roundBitsMask; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x7E ) { + if ( ! (uint32_t) (uiA<<1) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF32UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF32UI( uiA ) ) break; + case softfloat_round_near_maxMag: + if ( exp == 0x7E ) uiZ |= packToF32UI( 0, 0x7F, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF32UI( 1, 0x7F, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF32UI( 0, 0x7F, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x96 <= exp ) { + if ( (exp == 0xFF) && fracF32UI( uiA ) ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast32_t) 1<<(0x96 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF32UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_sqrt.c b/vendor/riscv-isa-sim/softfloat/f32_sqrt.c new file mode 100644 index 00000000..5ef659e4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_sqrt.c @@ -0,0 +1,121 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f32_sqrt( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool signA; + int_fast16_t expA; + uint_fast32_t sigA, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expZ; + uint_fast32_t sigZ, shiftedSigZ; + uint32_t negRem; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF32UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x7F)>>1) + 0x7E; + expA &= 1; + sigA = (sigA | 0x00800000)<<8; + sigZ = + ((uint_fast64_t) sigA * softfloat_approxRecipSqrt32_1( expA, sigA )) + >>32; + if ( expA ) sigZ >>= 1; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sigZ += 2; + if ( (sigZ & 0x3F) < 2 ) { + shiftedSigZ = sigZ>>2; + negRem = shiftedSigZ * shiftedSigZ; + sigZ &= ~3; + if ( negRem & 0x80000000 ) { + sigZ |= 1; + } else { + if ( negRem ) --sigZ; + } + } + return softfloat_roundPackToF32( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_sub.c b/vendor/riscv-isa-sim/softfloat/f32_sub.c new file mode 100644 index 00000000..d8307381 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_sub.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t f32_sub( float32_t a, float32_t b ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + union ui32_f32 uB; + uint_fast32_t uiB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 1) + float32_t (*magsFuncPtr)( uint_fast32_t, uint_fast32_t ); +#endif + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) + if ( signF32UI( uiA ^ uiB ) ) { + return softfloat_addMagsF32( uiA, uiB ); + } else { + return softfloat_subMagsF32( uiA, uiB ); + } +#else + magsFuncPtr = + signF32UI( uiA ^ uiB ) ? softfloat_addMagsF32 : softfloat_subMagsF32; + return (*magsFuncPtr)( uiA, uiB ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_f128.c b/vendor/riscv-isa-sim/softfloat/f32_to_f128.c new file mode 100644 index 00000000..bf519264 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_f128.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f32_to_f128( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + struct uint128 uiZ; + struct exp16_sig32 normExpSig; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF128UI( &commonNaN ); + } else { + uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); + uiZ.v0 = 0; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ.v64 = packToF128UI64( sign, 0, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + normExpSig = softfloat_normSubnormalF32Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ.v64 = packToF128UI64( sign, exp + 0x3F80, (uint_fast64_t) frac<<25 ); + uiZ.v0 = 0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_f16.c b/vendor/riscv-isa-sim/softfloat/f32_to_f16.c new file mode 100644 index 00000000..7a971589 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_f16.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f32_to_f16( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = frac>>9 | ((frac & 0x1FF) != 0); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF16( sign, exp - 0x71, frac16 | 0x4000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_f64.c b/vendor/riscv-isa-sim/softfloat/f32_to_f64.c new file mode 100644 index 00000000..f9e02f22 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_f64.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f32_to_f64( float32_t a ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t frac; + struct commonNaN commonNaN; + uint_fast64_t uiZ; + struct exp16_sig32 normExpSig; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + frac = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0xFF ) { + if ( frac ) { + softfloat_f32UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF64UI( &commonNaN ); + } else { + uiZ = packToF64UI( sign, 0x7FF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ = packToF64UI( sign, 0, 0 ); + goto uiZ; + } + normExpSig = softfloat_normSubnormalF32Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = packToF64UI( sign, exp + 0x380, (uint_fast64_t) frac<<29 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_i16.c b/vendor/riscv-isa-sim/softfloat/f32_to_i16.c new file mode 100644 index 00000000..bde4c76c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_i16.c @@ -0,0 +1,57 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +int_fast16_t f32_to_i16( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + int_fast32_t sig32 = f32_to_i32(a, roundingMode, exact); + + if (sig32 > INT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromPosOverflow; + } else if (sig32 < INT16_MIN) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return i16_fromNegOverflow; + } else { + return sig32; + } +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_i32.c b/vendor/riscv-isa-sim/softfloat/f32_to_i32.c new file mode 100644 index 00000000..c9f2cf9b --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_i32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_i32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f32_to_i32_r_minMag.c new file mode 100644 index 00000000..1a94dcc6 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_i32_r_minMag.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f32_to_i32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0x9E, 0 ) ) return -0x7FFFFFFF - 1; + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + absZ = sig>>shiftDist; + if ( exact && ((uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; +#ifdef SOFTFLOAT_FAST_INT64 + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; +#else + uint32_t extSig[3]; +#endif + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; +#ifdef SOFTFLOAT_FAST_INT64 + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToI64( sign, sig64, extra, roundingMode, exact ); +#else + extSig[indexWord( 3, 2 )] = sig<<8; + extSig[indexWord( 3, 1 )] = 0; + extSig[indexWord( 3, 0 )] = 0; + if ( shiftDist ) softfloat_shiftRightJam96M( extSig, shiftDist, extSig ); + return softfloat_roundMToI64( sign, extSig, roundingMode, exact ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_i64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f32_to_i64_r_minMag.c new file mode 100644 index 00000000..7d336a47 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_i64_r_minMag.c @@ -0,0 +1,94 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f32_to_i64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( shiftDist <= 0 ) { + if ( uiA == packToF32UI( 1, 0xBE, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + absZ = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sign ? -absZ : absZ; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_ui16.c b/vendor/riscv-isa-sim/softfloat/f32_to_ui16.c new file mode 100644 index 00000000..073492bf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_ui16.c @@ -0,0 +1,53 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f32_to_ui16( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + uint_fast8_t old_flags = softfloat_exceptionFlags; + + uint_fast32_t sig32 = f32_to_ui32(a, roundingMode, exact); + + if (sig32 > UINT16_MAX) { + softfloat_exceptionFlags = old_flags | softfloat_flag_invalid; + return ui16_fromPosOverflow; + } else { + return sig32; + } +} diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_ui32.c b/vendor/riscv-isa-sim/softfloat/f32_to_ui32.c new file mode 100644 index 00000000..5ec279ba --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_ui32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + uint_fast64_t sig64; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0xFF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<32; + shiftDist = 0xAA - exp; + if ( 0 < shiftDist ) sig64 = softfloat_shiftRightJam64( sig64, shiftDist ); + return softfloat_roundToUI32( sign, sig64, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_ui32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f32_to_ui32_r_minMag.c new file mode 100644 index 00000000..12f72619 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f32_to_ui32_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x9E - exp; + if ( 32 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig | 0x00800000)<<8; + z = sig>>shiftDist; + if ( exact && (z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64( float32_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + bool sign; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; +#ifdef SOFTFLOAT_FAST_INT64 + uint_fast64_t sig64, extra; + struct uint64_extra sig64Extra; +#else + uint32_t extSig[3]; +#endif + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF32UI( uiA ); + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( shiftDist < 0 ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= 0x00800000; +#ifdef SOFTFLOAT_FAST_INT64 + sig64 = (uint_fast64_t) sig<<40; + extra = 0; + if ( shiftDist ) { + sig64Extra = softfloat_shiftRightJam64Extra( sig64, 0, shiftDist ); + sig64 = sig64Extra.v; + extra = sig64Extra.extra; + } + return softfloat_roundToUI64( sign, sig64, extra, roundingMode, exact ); +#else + extSig[indexWord( 3, 2 )] = sig<<8; + extSig[indexWord( 3, 1 )] = 0; + extSig[indexWord( 3, 0 )] = 0; + if ( shiftDist ) softfloat_shiftRightJam96M( extSig, shiftDist, extSig ); + return softfloat_roundMToUI64( sign, extSig, roundingMode, exact ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f32_to_ui64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f32_to_ui64_r_minMag.c new file mode 100644 index 00000000..f96f3e1f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f32_to_ui64_r_minMag.c @@ -0,0 +1,90 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f32_to_ui64_r_minMag( float32_t a, bool exact ) +{ + union ui32_f32 uA; + uint_fast32_t uiA; + int_fast16_t exp; + uint_fast32_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t sig64, z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF32UI( uiA ); + sig = fracF32UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0xBE - exp; + if ( 64 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF32UI( uiA ); + if ( sign || (shiftDist < 0) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0xFF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= 0x00800000; + sig64 = (uint_fast64_t) sig<<40; + z = sig64>>shiftDist; + shiftDist = 40 - shiftDist; + if ( exact && (shiftDist < 0) && (uint32_t) (sig<<(shiftDist & 31)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_add.c b/vendor/riscv-isa-sim/softfloat/f64_add.c new file mode 100644 index 00000000..e9880ddf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_add.c @@ -0,0 +1,74 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_add( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2) + float64_t (*magsFuncPtr)( uint_fast64_t, uint_fast64_t, bool ); +#endif + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) + if ( signA == signB ) { + return softfloat_addMagsF64( uiA, uiB, signA ); + } else { + return softfloat_subMagsF64( uiA, uiB, signA ); + } +#else + magsFuncPtr = + (signA == signB) ? softfloat_addMagsF64 : softfloat_subMagsF64; + return (*magsFuncPtr)( uiA, uiB, signA ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_classify.c b/vendor/riscv-isa-sim/softfloat/f64_classify.c new file mode 100755 index 00000000..180abde3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_classify.c @@ -0,0 +1,36 @@ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast16_t f64_classify( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + + uA.f = a; + uiA = uA.ui; + + uint_fast16_t infOrNaN = expF64UI( uiA ) == 0x7FF; + uint_fast16_t subnormalOrZero = expF64UI( uiA ) == 0; + bool sign = signF64UI( uiA ); + bool fracZero = fracF64UI( uiA ) == 0; + bool isNaN = isNaNF64UI( uiA ); + bool isSNaN = softfloat_isSigNaNF64UI( uiA ); + + return + ( sign && infOrNaN && fracZero ) << 0 | + ( sign && !infOrNaN && !subnormalOrZero ) << 1 | + ( sign && subnormalOrZero && !fracZero ) << 2 | + ( sign && subnormalOrZero && fracZero ) << 3 | + ( !sign && infOrNaN && fracZero ) << 7 | + ( !sign && !infOrNaN && !subnormalOrZero ) << 6 | + ( !sign && subnormalOrZero && !fracZero ) << 5 | + ( !sign && subnormalOrZero && fracZero ) << 4 | + ( isNaN && isSNaN ) << 8 | + ( isNaN && !isSNaN ) << 9; +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_div.c b/vendor/riscv-isa-sim/softfloat/f64_div.c new file mode 100644 index 00000000..c5a2d4fe --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_div.c @@ -0,0 +1,172 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_div( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t recip32, sig32Z, doubleTerm; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto invalid; + } + goto infinity; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + goto zero; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) { + if ( ! (expA | sigA) ) goto invalid; + softfloat_raiseFlags( softfloat_flag_infinite ); + goto infinity; + } + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA - expB + 0x3FE; + sigA |= UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + if ( sigA < sigB ) { + --expZ; + sigA <<= 11; + } else { + sigA <<= 10; + } + sigB <<= 11; + recip32 = softfloat_approxRecip32_1( sigB>>32 ) - 2; + sig32Z = ((uint32_t) (sigA>>32) * (uint_fast64_t) recip32)>>32; + doubleTerm = sig32Z<<1; + rem = + ((sigA - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + q = (((uint32_t) (rem>>32) * (uint_fast64_t) recip32)>>32) + 4; + sigZ = ((uint_fast64_t) sig32Z<<32) + ((uint_fast64_t) q<<4); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 4<<4 ) { + q &= ~7; + sigZ &= ~(uint_fast64_t) 0x7F; + doubleTerm = q<<1; + rem = + ((rem - (uint_fast64_t) doubleTerm * (uint32_t) (sigB>>32))<<28) + - (uint_fast64_t) doubleTerm * ((uint32_t) sigB>>4); + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + sigZ -= 1<<7; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infinity: + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_eq.c b/vendor/riscv-isa-sim/softfloat/f64_eq.c new file mode 100644 index 00000000..ccb602a3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_eq.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_eq( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_eq_signaling.c b/vendor/riscv-isa-sim/softfloat/f64_eq_signaling.c new file mode 100644 index 00000000..ee5a4414 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_eq_signaling.c @@ -0,0 +1,61 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_eq_signaling( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_isSignalingNaN.c b/vendor/riscv-isa-sim/softfloat/f64_isSignalingNaN.c new file mode 100644 index 00000000..f55acb4a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_isSignalingNaN.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_isSignalingNaN( float64_t a ) +{ + union ui64_f64 uA; + + uA.f = a; + return softfloat_isSigNaNF64UI( uA.ui ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_le.c b/vendor/riscv-isa-sim/softfloat/f64_le.c new file mode 100644 index 00000000..91fc994a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_le.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_le( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_le_quiet.c b/vendor/riscv-isa-sim/softfloat/f64_le_quiet.c new file mode 100644 index 00000000..a5d332a5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_le_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_le_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_lt.c b/vendor/riscv-isa-sim/softfloat/f64_lt.c new file mode 100644 index 00000000..abf62fd3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_lt.c @@ -0,0 +1,67 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +bool f64_lt( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_lt_quiet.c b/vendor/riscv-isa-sim/softfloat/f64_lt_quiet.c new file mode 100644 index 00000000..6531f577 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_lt_quiet.c @@ -0,0 +1,72 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +bool f64_lt_quiet( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signA, signB; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { + if ( + softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return false; + } + signA = signF64UI( uiA ); + signB = signF64UI( uiB ); + return + (signA != signB) + ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_mul.c b/vendor/riscv-isa-sim/softfloat/f64_mul.c new file mode 100644 index 00000000..86f66545 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_mul.c @@ -0,0 +1,150 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_mul( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signZ; + uint_fast64_t magBits; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; +#ifdef SOFTFLOAT_FAST_INT64 + struct uint128 sig128Z; +#else + uint32_t sig128Z[4]; +#endif + uint_fast64_t sigZ, uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signZ = signA ^ signB; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + magBits = expB | sigB; + goto infArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + magBits = expA | sigA; + goto infArg; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zero; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FF; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<11; +#ifdef SOFTFLOAT_FAST_INT64 + sig128Z = softfloat_mul64To128( sigA, sigB ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); +#else + softfloat_mul64To128M( sigA, sigB, sig128Z ); + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 | sig128Z[indexWord( 4, 2 )]; + if ( sig128Z[indexWord( 4, 1 )] || sig128Z[indexWord( 4, 0 )] ) sigZ |= 1; +#endif + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infArg: + if ( ! magBits ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + } else { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + } + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zero: + uiZ = packToF64UI( signZ, 0, 0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_mulAdd.c b/vendor/riscv-isa-sim/softfloat/f64_mulAdd.c new file mode 100644 index 00000000..67fc44d3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_mulAdd.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_mulAdd( float64_t a, float64_t b, float64_t c ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + union ui64_f64 uB; + uint_fast64_t uiB; + union ui64_f64 uC; + uint_fast64_t uiC; + + uA.f = a; + uiA = uA.ui; + uB.f = b; + uiB = uB.ui; + uC.f = c; + uiC = uC.ui; + return softfloat_mulAddF64( uiA, uiB, uiC, 0 ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_rem.c b/vendor/riscv-isa-sim/softfloat/f64_rem.c new file mode 100644 index 00000000..e9174554 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_rem.c @@ -0,0 +1,189 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_rem( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + union ui64_f64 uB; + uint_fast64_t uiB; + int_fast16_t expB; + uint_fast64_t sigB; + struct exp16_sig64 normExpSig; + uint64_t rem; + int_fast16_t expDiff; + uint32_t q, recip32; + uint_fast64_t q64; + uint64_t altRem, meanRem; + bool signRem; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN; + goto invalid; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA < expB - 1 ) return a; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expB ) { + if ( ! sigB ) goto invalid; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + rem = sigA | UINT64_C( 0x0010000000000000 ); + sigB |= UINT64_C( 0x0010000000000000 ); + expDiff = expA - expB; + if ( expDiff < 1 ) { + if ( expDiff < -1 ) return a; + sigB <<= 9; + if ( expDiff ) { + rem <<= 8; + q = 0; + } else { + rem <<= 9; + q = (sigB <= rem); + if ( q ) rem -= sigB; + } + } else { + recip32 = softfloat_approxRecip32_1( sigB>>21 ); + /*-------------------------------------------------------------------- + | Changing the shift of `rem' here requires also changing the initial + | subtraction from `expDiff'. + *--------------------------------------------------------------------*/ + rem <<= 9; + expDiff -= 30; + /*-------------------------------------------------------------------- + | The scale of `sigB' affects how many bits are obtained during each + | cycle of the loop. Currently this is 29 bits per loop iteration, + | the maximum possible. + *--------------------------------------------------------------------*/ + sigB <<= 9; + for (;;) { + q64 = (uint32_t) (rem>>32) * (uint_fast64_t) recip32; + if ( expDiff < 0 ) break; + q = (q64 + 0x80000000)>>32; +#ifdef SOFTFLOAT_FAST_INT64 + rem <<= 29; +#else + rem = (uint_fast64_t) (uint32_t) (rem>>3)<<32; +#endif + rem -= q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) rem += sigB; + expDiff -= 29; + } + /*-------------------------------------------------------------------- + | (`expDiff' cannot be less than -29 here.) + *--------------------------------------------------------------------*/ + q = (uint32_t) (q64>>32)>>(~expDiff & 31); + rem = (rem<<(expDiff + 30)) - q * (uint64_t) sigB; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + altRem = rem + sigB; + goto selectRem; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + do { + altRem = rem; + ++q; + rem -= sigB; + } while ( ! (rem & UINT64_C( 0x8000000000000000 )) ); + selectRem: + meanRem = rem + altRem; + if ( + (meanRem & UINT64_C( 0x8000000000000000 )) || (! meanRem && (q & 1)) + ) { + rem = altRem; + } + signRem = signA; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + signRem = ! signRem; + rem = -rem; + } + return softfloat_normRoundPackToF64( signRem, expB, rem ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto uiZ; + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_roundToInt.c b/vendor/riscv-isa-sim/softfloat/f64_roundToInt.c new file mode 100644 index 00000000..7f810070 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_roundToInt.c @@ -0,0 +1,112 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_roundToInt( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t uiZ, lastBitMask, roundBitsMask; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp <= 0x3FE ) { + if ( ! (uiA & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) return a; + if ( exact ) softfloat_exceptionFlags |= softfloat_flag_inexact; + uiZ = uiA & packToF64UI( 1, 0, 0 ); + switch ( roundingMode ) { + case softfloat_round_near_even: + if ( ! fracF64UI( uiA ) ) break; + case softfloat_round_near_maxMag: + if ( exp == 0x3FE ) uiZ |= packToF64UI( 0, 0x3FF, 0 ); + break; + case softfloat_round_min: + if ( uiZ ) uiZ = packToF64UI( 1, 0x3FF, 0 ); + break; + case softfloat_round_max: + if ( ! uiZ ) uiZ = packToF64UI( 0, 0x3FF, 0 ); + break; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x433 <= exp ) { + if ( (exp == 0x7FF) && fracF64UI( uiA ) ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + return a; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uiZ = uiA; + lastBitMask = (uint_fast64_t) 1<<(0x433 - exp); + roundBitsMask = lastBitMask - 1; + if ( roundingMode == softfloat_round_near_maxMag ) { + uiZ += lastBitMask>>1; + } else if ( roundingMode == softfloat_round_near_even ) { + uiZ += lastBitMask>>1; + if ( ! (uiZ & roundBitsMask) ) uiZ &= ~lastBitMask; + } else if ( + roundingMode + == (signF64UI( uiZ ) ? softfloat_round_min : softfloat_round_max) + ) { + uiZ += roundBitsMask; + } + uiZ &= ~roundBitsMask; + if ( exact && (uiZ != uiA) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_sqrt.c b/vendor/riscv-isa-sim/softfloat/f64_sqrt.c new file mode 100644 index 00000000..9a06cfad --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_sqrt.c @@ -0,0 +1,133 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t f64_sqrt( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + int_fast16_t expA; + uint_fast64_t sigA, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t sig32A, recipSqrt32, sig32Z; + uint_fast64_t rem; + uint32_t q; + uint_fast64_t sigZ, shiftedSigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA ) { + uiZ = softfloat_propagateNaNF64UI( uiA, 0 ); + goto uiZ; + } + if ( ! signA ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signA ) { + if ( ! (expA | sigA) ) return a; + goto invalid; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) return a; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + /*------------------------------------------------------------------------ + | (`sig32Z' is guaranteed to be a lower bound on the square root of + | `sig32A', which makes `sig32Z' also a lower bound on the square root of + | `sigA'.) + *------------------------------------------------------------------------*/ + expZ = ((expA - 0x3FF)>>1) + 0x3FE; + expA &= 1; + sigA |= UINT64_C( 0x0010000000000000 ); + sig32A = sigA>>21; + recipSqrt32 = softfloat_approxRecipSqrt32_1( expA, sig32A ); + sig32Z = ((uint_fast64_t) sig32A * recipSqrt32)>>32; + if ( expA ) { + sigA <<= 8; + sig32Z >>= 1; + } else { + sigA <<= 9; + } + rem = sigA - (uint_fast64_t) sig32Z * sig32Z; + q = ((uint32_t) (rem>>2) * (uint_fast64_t) recipSqrt32)>>32; + sigZ = ((uint_fast64_t) sig32Z<<32 | 1<<5) + ((uint_fast64_t) q<<3); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( (sigZ & 0x1FF) < 0x22 ) { + sigZ &= ~(uint_fast64_t) 0x3F; + shiftedSigZ = sigZ>>6; + rem = (sigA<<52) - shiftedSigZ * shiftedSigZ; + if ( rem & UINT64_C( 0x8000000000000000 ) ) { + --sigZ; + } else { + if ( rem ) sigZ |= 1; + } + } + return softfloat_roundPackToF64( 0, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_sub.c b/vendor/riscv-isa-sim/softfloat/f64_sub.c new file mode 100644 index 00000000..0e990cd1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_sub.c @@ -0,0 +1,74 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t f64_sub( float64_t a, float64_t b ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool signA; + union ui64_f64 uB; + uint_fast64_t uiB; + bool signB; +#if ! defined INLINE_LEVEL || (INLINE_LEVEL < 2) + float64_t (*magsFuncPtr)( uint_fast64_t, uint_fast64_t, bool ); +#endif + + uA.f = a; + uiA = uA.ui; + signA = signF64UI( uiA ); + uB.f = b; + uiB = uB.ui; + signB = signF64UI( uiB ); +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) + if ( signA == signB ) { + return softfloat_subMagsF64( uiA, uiB, signA ); + } else { + return softfloat_addMagsF64( uiA, uiB, signA ); + } +#else + magsFuncPtr = + (signA == signB) ? softfloat_subMagsF64 : softfloat_addMagsF64; + return (*magsFuncPtr)( uiA, uiB, signA ); +#endif + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_f128.c b/vendor/riscv-isa-sim/softfloat/f64_to_f128.c new file mode 100644 index 00000000..92c2d560 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_f128.c @@ -0,0 +1,98 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t f64_to_f128( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + struct uint128 uiZ; + struct exp16_sig64 normExpSig; + struct uint128 frac128; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF128UI( &commonNaN ); + } else { + uiZ.v64 = packToF128UI64( sign, 0x7FFF, 0 ); + uiZ.v0 = 0; + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! exp ) { + if ( ! frac ) { + uiZ.v64 = packToF128UI64( sign, 0, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + normExpSig = softfloat_normSubnormalF64Sig( frac ); + exp = normExpSig.exp - 1; + frac = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac128 = softfloat_shortShiftLeft128( 0, frac, 60 ); + uiZ.v64 = packToF128UI64( sign, exp + 0x3C00, frac128.v64 ); + uiZ.v0 = frac128.v0; + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_f16.c b/vendor/riscv-isa-sim/softfloat/f64_to_f16.c new file mode 100644 index 00000000..325788c6 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_f16.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t f64_to_f16( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + uint_fast16_t uiZ, frac16; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF16UI( &commonNaN ); + } else { + uiZ = packToF16UI( sign, 0x1F, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac16 = softfloat_shortShiftRightJam64( frac, 38 ); + if ( ! (exp | frac16) ) { + uiZ = packToF16UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF16( sign, exp - 0x3F1, frac16 | 0x4000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_f32.c b/vendor/riscv-isa-sim/softfloat/f64_to_f32.c new file mode 100644 index 00000000..99b13dda --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_f32.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t f64_to_f32( float64_t a ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t frac; + struct commonNaN commonNaN; + uint_fast32_t uiZ, frac32; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + frac = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp == 0x7FF ) { + if ( frac ) { + softfloat_f64UIToCommonNaN( uiA, &commonNaN ); + uiZ = softfloat_commonNaNToF32UI( &commonNaN ); + } else { + uiZ = packToF32UI( sign, 0xFF, 0 ); + } + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + frac32 = softfloat_shortShiftRightJam64( frac, 22 ); + if ( ! (exp | frac32) ) { + uiZ = packToF32UI( sign, 0, 0 ); + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + return softfloat_roundPackToF32( sign, exp - 0x381, frac32 | 0x40000000 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_i32.c b/vendor/riscv-isa-sim/softfloat/f64_to_i32.c new file mode 100644 index 00000000..8712c0ac --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_i32.c @@ -0,0 +1,82 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (i32_fromNaN != i32_fromPosOverflow) || (i32_fromNaN != i32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (i32_fromNaN == i32_fromPosOverflow) + sign = 0; +#elif (i32_fromNaN == i32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return i32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToI32( sign, sig, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_i32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f64_to_i32_r_minMag.c new file mode 100644 index 00000000..b7e1e030 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_i32_r_minMag.c @@ -0,0 +1,96 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t f64_to_i32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + int_fast32_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( shiftDist < 22 ) { + if ( + sign && (exp == 0x41E) && (sig < UINT64_C( 0x0000000000200000 )) + ) { + if ( exact && sig ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return -0x7FFFFFFF - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i32_fromNaN + : sign ? i32_fromNegOverflow : i32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && ((uint_fast64_t) (uint_fast32_t) absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; +#ifdef SOFTFLOAT_FAST_INT64 + struct uint64_extra sigExtra; +#else + uint32_t extSig[3]; +#endif + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; +#ifdef SOFTFLOAT_FAST_INT64 + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); +#else + extSig[indexWord( 3, 0 )] = 0; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sig <<= -shiftDist; + extSig[indexWord( 3, 2 )] = sig>>32; + extSig[indexWord( 3, 1 )] = sig; + } else { + extSig[indexWord( 3, 2 )] = sig>>32; + extSig[indexWord( 3, 1 )] = sig; + softfloat_shiftRightJam96M( extSig, shiftDist, extSig ); + } + return softfloat_roundMToI64( sign, extSig, roundingMode, exact ); +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_i64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f64_to_i64_r_minMag.c new file mode 100644 index 00000000..3822606d --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_i64_r_minMag.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t f64_to_i64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + int_fast64_t absZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( shiftDist <= 0 ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( shiftDist < -10 ) { + if ( uiA == packToF64UI( 1, 0x43E, 0 ) ) { + return -INT64_C( 0x7FFFFFFFFFFFFFFF ) - 1; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? i64_fromNaN + : sign ? i64_fromNegOverflow : i64_fromPosOverflow; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig<<-shiftDist; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + absZ = sig>>shiftDist; + if ( exact && (absZ< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ +#if (ui32_fromNaN != ui32_fromPosOverflow) || (ui32_fromNaN != ui32_fromNegOverflow) + if ( (exp == 0x7FF) && sig ) { +#if (ui32_fromNaN == ui32_fromPosOverflow) + sign = 0; +#elif (ui32_fromNaN == ui32_fromNegOverflow) + sign = 1; +#else + softfloat_raiseFlags( softfloat_flag_invalid ); + return ui32_fromNaN; +#endif + } +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x427 - exp; + if ( 0 < shiftDist ) sig = softfloat_shiftRightJam64( sig, shiftDist ); + return softfloat_roundToUI32( sign, sig, roundingMode, exact ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_ui32_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f64_to_ui32_r_minMag.c new file mode 100644 index 00000000..11f0b050 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_ui32_r_minMag.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t f64_to_ui32_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign || (shiftDist < 21) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui32_fromNaN + : sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && ((uint_fast64_t) z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64( float64_t a, uint_fast8_t roundingMode, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + bool sign; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; +#ifdef SOFTFLOAT_FAST_INT64 + struct uint64_extra sigExtra; +#else + uint32_t extSig[3]; +#endif + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + sign = signF64UI( uiA ); + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( exp ) sig |= UINT64_C( 0x0010000000000000 ); + shiftDist = 0x433 - exp; +#ifdef SOFTFLOAT_FAST_INT64 + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sigExtra.v = sig<<-shiftDist; + sigExtra.extra = 0; + } else { + sigExtra = softfloat_shiftRightJam64Extra( sig, 0, shiftDist ); + } + return + softfloat_roundToUI64( + sign, sigExtra.v, sigExtra.extra, roundingMode, exact ); +#else + extSig[indexWord( 3, 0 )] = 0; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + sig <<= -shiftDist; + extSig[indexWord( 3, 2 )] = sig>>32; + extSig[indexWord( 3, 1 )] = sig; + } else { + extSig[indexWord( 3, 2 )] = sig>>32; + extSig[indexWord( 3, 1 )] = sig; + softfloat_shiftRightJam96M( extSig, shiftDist, extSig ); + } + return softfloat_roundMToUI64( sign, extSig, roundingMode, exact ); +#endif + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && fracF64UI( uiA ) ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/f64_to_ui64_r_minMag.c b/vendor/riscv-isa-sim/softfloat/f64_to_ui64_r_minMag.c new file mode 100644 index 00000000..25918c48 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/f64_to_ui64_r_minMag.c @@ -0,0 +1,93 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t f64_to_ui64_r_minMag( float64_t a, bool exact ) +{ + union ui64_f64 uA; + uint_fast64_t uiA; + int_fast16_t exp; + uint_fast64_t sig; + int_fast16_t shiftDist; + bool sign; + uint_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + uA.f = a; + uiA = uA.ui; + exp = expF64UI( uiA ); + sig = fracF64UI( uiA ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 0x433 - exp; + if ( 53 <= shiftDist ) { + if ( exact && (exp | sig) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sign = signF64UI( uiA ); + if ( sign ) goto invalid; + if ( shiftDist <= 0 ) { + if ( shiftDist < -11 ) goto invalid; + z = (sig | UINT64_C( 0x0010000000000000 ))<<-shiftDist; + } else { + sig |= UINT64_C( 0x0010000000000000 ); + z = sig>>shiftDist; + if ( exact && (uint64_t) (sig<<(-shiftDist & 63)) ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return + (exp == 0x7FF) && sig ? ui64_fromNaN + : sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/fall_maxmin.c b/vendor/riscv-isa-sim/softfloat/fall_maxmin.c new file mode 100644 index 00000000..32a9ade5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/fall_maxmin.c @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#define COMPARE_MAX(a, b, bits) \ +float ## bits ## _t f ## bits ## _max( float ## bits ## _t a, float ## bits ## _t b ) \ +{ \ + bool greater = f ## bits ## _lt_quiet(b, a) || \ + (f ## bits ## _eq(b, a) && signF ## bits ## UI(b.v)); \ + \ + if (isNaNF ## bits ## UI(a.v) && isNaNF ## bits ## UI(b.v)) { \ + union ui ## bits ## _f ## bits ui; \ + ui.ui = defaultNaNF ## bits ## UI; \ + return ui.f; \ + } else { \ + return greater || isNaNF ## bits ## UI((b).v) ? a : b; \ + } \ +} + +#define COMPARE_MIN(a, b, bits) \ +float ## bits ## _t f ## bits ## _min( float ## bits ## _t a, float ## bits ## _t b ) \ +{ \ + bool less = f ## bits ## _lt_quiet(a, b) || \ + (f ## bits ## _eq(a, b) && signF ## bits ## UI(a.v)); \ + \ + if (isNaNF ## bits ## UI(a.v) && isNaNF ## bits ## UI(b.v)) { \ + union ui ## bits ## _f ## bits ui; \ + ui.ui = defaultNaNF ## bits ## UI; \ + return ui.f; \ + } else { \ + return less || isNaNF ## bits ## UI((b).v) ? a : b; \ + } \ +} + +COMPARE_MAX(a, b, 16); +COMPARE_MAX(a, b, 32); +COMPARE_MAX(a, b, 64); + +COMPARE_MIN(a, b, 16); +COMPARE_MIN(a, b, 32); +COMPARE_MIN(a, b, 64); diff --git a/vendor/riscv-isa-sim/softfloat/fall_reciprocal.c b/vendor/riscv-isa-sim/softfloat/fall_reciprocal.c new file mode 100644 index 00000000..1c964589 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/fall_reciprocal.c @@ -0,0 +1,392 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +static inline uint64_t extract64(uint64_t val, int pos, int len) +{ + assert(pos >= 0 && len > 0 && len <= 64 - pos); + return (val >> pos) & (~UINT64_C(0) >> (64 - len)); +} + +static inline uint64_t make_mask64(int pos, int len) +{ + assert(pos >= 0 && len > 0 && pos < 64 && len <= 64); + return (UINT64_MAX >> (64 - len)) << pos; +} + +//user needs to truncate output to required length +static inline uint64_t rsqrte7(uint64_t val, int e, int s, bool sub) { + uint64_t exp = extract64(val, s, e); + uint64_t sig = extract64(val, 0, s); + uint64_t sign = extract64(val, s + e, 1); + const int p = 7; + + static const uint8_t table[] = { + 52, 51, 50, 48, 47, 46, 44, 43, + 42, 41, 40, 39, 38, 36, 35, 34, + 33, 32, 31, 30, 30, 29, 28, 27, + 26, 25, 24, 23, 23, 22, 21, 20, + 19, 19, 18, 17, 16, 16, 15, 14, + 14, 13, 12, 12, 11, 10, 10, 9, + 9, 8, 7, 7, 6, 6, 5, 4, + 4, 3, 3, 2, 2, 1, 1, 0, + 127, 125, 123, 121, 119, 118, 116, 114, + 113, 111, 109, 108, 106, 105, 103, 102, + 100, 99, 97, 96, 95, 93, 92, 91, + 90, 88, 87, 86, 85, 84, 83, 82, + 80, 79, 78, 77, 76, 75, 74, 73, + 72, 71, 70, 70, 69, 68, 67, 66, + 65, 64, 63, 63, 62, 61, 60, 59, + 59, 58, 57, 56, 56, 55, 54, 53}; + + if (sub) { + while (extract64(sig, s - 1, 1) == 0) + exp--, sig <<= 1; + + sig = (sig << 1) & make_mask64(0 ,s); + } + + int idx = ((exp & 1) << (p-1)) | (sig >> (s-p+1)); + uint64_t out_sig = (uint64_t)(table[idx]) << (s-p); + uint64_t out_exp = (3 * make_mask64(0, e - 1) + ~exp) / 2; + + return (sign << (s+e)) | (out_exp << s) | out_sig; +} + +float16_t f16_rsqrte7(float16_t in) +{ + union ui16_f16 uA; + + uA.f = in; + unsigned int ret = f16_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF16UI; + break; + case 0x008: // -0 + uA.ui = 0xfc00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7c00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 5, 10, sub); + break; + } + + return uA.f; +} + +float32_t f32_rsqrte7(float32_t in) +{ + union ui32_f32 uA; + + uA.f = in; + unsigned int ret = f32_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF32UI; + break; + case 0x008: // -0 + uA.ui = 0xff800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7f800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 8, 23, sub); + break; + } + + return uA.f; +} + +float64_t f64_rsqrte7(float64_t in) +{ + union ui64_f64 uA; + + uA.f = in; + unsigned int ret = f64_classify(in); + bool sub = false; + switch(ret) { + case 0x001: // -inf + case 0x002: // -normal + case 0x004: // -subnormal + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF64UI; + break; + case 0x008: // -0 + uA.ui = 0xfff0000000000000ul; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7ff0000000000000ul; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x020: //+ sub + sub = true; + default: // +num + uA.ui = rsqrte7(uA.ui, 11, 52, sub); + break; + } + + return uA.f; +} + +//user needs to truncate output to required length +static inline uint64_t recip7(uint64_t val, int e, int s, int rm, bool sub, + bool *round_abnormal) +{ + uint64_t exp = extract64(val, s, e); + uint64_t sig = extract64(val, 0, s); + uint64_t sign = extract64(val, s + e, 1); + const int p = 7; + + static const uint8_t table[] = { + 127, 125, 123, 121, 119, 117, 116, 114, + 112, 110, 109, 107, 105, 104, 102, 100, + 99, 97, 96, 94, 93, 91, 90, 88, + 87, 85, 84, 83, 81, 80, 79, 77, + 76, 75, 74, 72, 71, 70, 69, 68, + 66, 65, 64, 63, 62, 61, 60, 59, + 58, 57, 56, 55, 54, 53, 52, 51, + 50, 49, 48, 47, 46, 45, 44, 43, + 42, 41, 40, 40, 39, 38, 37, 36, + 35, 35, 34, 33, 32, 31, 31, 30, + 29, 28, 28, 27, 26, 25, 25, 24, + 23, 23, 22, 21, 21, 20, 19, 19, + 18, 17, 17, 16, 15, 15, 14, 14, + 13, 12, 12, 11, 11, 10, 9, 9, + 8, 8, 7, 7, 6, 5, 5, 4, + 4, 3, 3, 2, 2, 1, 1, 0}; + + if (sub) { + while (extract64(sig, s - 1, 1) == 0) + exp--, sig <<= 1; + + sig = (sig << 1) & make_mask64(0 ,s); + + if (exp != 0 && exp != UINT64_MAX) { + *round_abnormal = true; + if (rm == 1 || + (rm == 2 && !sign) || + (rm == 3 && sign)) + return ((sign << (s+e)) | make_mask64(s, e)) - 1; + else + return (sign << (s+e)) | make_mask64(s, e); + } + } + + int idx = sig >> (s-p); + uint64_t out_sig = (uint64_t)(table[idx]) << (s-p); + uint64_t out_exp = 2 * make_mask64(0, e - 1) + ~exp; + if (out_exp == 0 || out_exp == UINT64_MAX) { + out_sig = (out_sig >> 1) | make_mask64(s - 1, 1); + if (out_exp == UINT64_MAX) { + out_sig >>= 1; + out_exp = 0; + } + } + + return (sign << (s+e)) | (out_exp << s) | out_sig; +} + +float16_t f16_recip7(float16_t in) +{ + union ui16_f16 uA; + + uA.f = in; + unsigned int ret = f16_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x8000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xfc00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7c00; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF16UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 5, 10, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} + +float32_t f32_recip7(float32_t in) +{ + union ui32_f32 uA; + + uA.f = in; + unsigned int ret = f32_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x80000000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xff800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7f800000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF32UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 8, 23, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} + +float64_t f64_recip7(float64_t in) +{ + union ui64_f64 uA; + + uA.f = in; + unsigned int ret = f64_classify(in); + bool sub = false; + bool round_abnormal = false; + switch(ret) { + case 0x001: // -inf + uA.ui = 0x8000000000000000; + break; + case 0x080: //+inf + uA.ui = 0x0; + break; + case 0x008: // -0 + uA.ui = 0xfff0000000000000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x010: // +0 + uA.ui = 0x7ff0000000000000; + softfloat_exceptionFlags |= softfloat_flag_infinite; + break; + case 0x100: // sNaN + softfloat_exceptionFlags |= softfloat_flag_invalid; + case 0x200: //qNaN + uA.ui = defaultNaNF64UI; + break; + case 0x004: // -subnormal + case 0x020: //+ sub + sub = true; + default: // +- normal + uA.ui = recip7(uA.ui, 11, 52, + softfloat_roundingMode, sub, &round_abnormal); + if (round_abnormal) + softfloat_exceptionFlags |= softfloat_flag_inexact | + softfloat_flag_overflow; + break; + } + + return uA.f; +} diff --git a/vendor/riscv-isa-sim/softfloat/i32_to_f128.c b/vendor/riscv-isa-sim/softfloat/i32_to_f128.c new file mode 100644 index 00000000..af7268ae --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/i32_to_f128.c @@ -0,0 +1,64 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t i32_to_f128( int32_t a ) +{ + uint_fast64_t uiZ64; + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui128_f128 uZ; + + uiZ64 = 0; + if ( a ) { + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) + 17; + uiZ64 = + packToF128UI64( + sign, 0x402E - shiftDist, (uint_fast64_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t i32_to_f16( int32_t a ) +{ + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui16_f16 u; + uint_fast16_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) - 21; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF16UI( + sign, 0x18 - shiftDist, (uint_fast16_t) absA<>(-shiftDist) + | ((uint32_t) (absA<<(shiftDist & 31)) != 0) + : (uint_fast16_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i32_to_f32( int32_t a ) +{ + bool sign; + union ui32_f32 uZ; + uint_fast32_t absA; + + sign = (a < 0); + if ( ! (a & 0x7FFFFFFF) ) { + uZ.ui = sign ? packToF32UI( 1, 0x9E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + return softfloat_normRoundPackToF32( sign, 0x9C, absA ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/i32_to_f64.c b/vendor/riscv-isa-sim/softfloat/i32_to_f64.c new file mode 100644 index 00000000..d3901eb4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/i32_to_f64.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i32_to_f64( int32_t a ) +{ + uint_fast64_t uiZ; + bool sign; + uint_fast32_t absA; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + sign = (a < 0); + absA = sign ? -(uint_fast32_t) a : (uint_fast32_t) a; + shiftDist = softfloat_countLeadingZeros32( absA ) + 21; + uiZ = + packToF64UI( + sign, 0x432 - shiftDist, (uint_fast64_t) absA< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t i64_to_f128( int64_t a ) +{ + uint_fast64_t uiZ64, uiZ0; + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + struct uint128 zSig; + union ui128_f128 uZ; + + if ( ! a ) { + uiZ64 = 0; + uiZ0 = 0; + } else { + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) + 49; + if ( 64 <= shiftDist ) { + zSig.v64 = absA<<(shiftDist - 64); + zSig.v0 = 0; + } else { + zSig = softfloat_shortShiftLeft128( 0, absA, shiftDist ); + } + uiZ64 = packToF128UI64( sign, 0x406E - shiftDist, zSig.v64 ); + uiZ0 = zSig.v0; + } + uZ.ui.v64 = uiZ64; + uZ.ui.v0 = uiZ0; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/i64_to_f16.c b/vendor/riscv-isa-sim/softfloat/i64_to_f16.c new file mode 100644 index 00000000..56f01912 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/i64_to_f16.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t i64_to_f16( int64_t a ) +{ + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + union ui16_f16 u; + uint_fast16_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) - 53; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF16UI( + sign, 0x18 - shiftDist, (uint_fast16_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t i64_to_f32( int64_t a ) +{ + bool sign; + uint_fast64_t absA; + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + sign = (a < 0); + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + shiftDist = softfloat_countLeadingZeros64( absA ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + sign, 0x95 - shiftDist, (uint_fast32_t) absA< +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t i64_to_f64( int64_t a ) +{ + bool sign; + union ui64_f64 uZ; + uint_fast64_t absA; + + sign = (a < 0); + if ( ! (a & UINT64_C( 0x7FFFFFFFFFFFFFFF )) ) { + uZ.ui = sign ? packToF64UI( 1, 0x43E, 0 ) : 0; + return uZ.f; + } + absA = sign ? -(uint_fast64_t) a : (uint_fast64_t) a; + return softfloat_normRoundPackToF64( sign, 0x43C, absA ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/internals.h b/vendor/riscv-isa-sim/softfloat/internals.h new file mode 100644 index 00000000..55585e96 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/internals.h @@ -0,0 +1,286 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef internals_h +#define internals_h 1 + +#include +#include +#include "primitives.h" +#include "softfloat_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +union ui16_f16 { uint16_t ui; float16_t f; }; +union ui32_f32 { uint32_t ui; float32_t f; }; +union ui64_f64 { uint64_t ui; float64_t f; }; + +#ifdef SOFTFLOAT_FAST_INT64 +union extF80M_extF80 { struct extFloat80M fM; extFloat80_t f; }; +union ui128_f128 { struct uint128 ui; float128_t f; }; +#endif + +enum { + softfloat_mulAdd_subC = 1, + softfloat_mulAdd_subProd = 2 +}; + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +uint_fast32_t softfloat_roundToUI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +#ifdef SOFTFLOAT_FAST_INT64 +uint_fast64_t + softfloat_roundToUI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); +#else +uint_fast64_t softfloat_roundMToUI64( bool, uint32_t *, uint_fast8_t, bool ); +#endif + +int_fast32_t softfloat_roundToI32( bool, uint_fast64_t, uint_fast8_t, bool ); + +#ifdef SOFTFLOAT_FAST_INT64 +int_fast64_t + softfloat_roundToI64( + bool, uint_fast64_t, uint_fast64_t, uint_fast8_t, bool ); +#else +int_fast64_t softfloat_roundMToI64( bool, uint32_t *, uint_fast8_t, bool ); +#endif + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF16UI( a ) ((bool) ((uint16_t) (a)>>15)) +#define expF16UI( a ) ((int_fast8_t) ((a)>>10) & 0x1F) +#define fracF16UI( a ) ((a) & 0x03FF) +#define packToF16UI( sign, exp, sig ) (((uint16_t) (sign)<<15) + ((uint16_t) (exp)<<10) + (sig)) + +#define isNaNF16UI( a ) (((~(a) & 0x7C00) == 0) && ((a) & 0x03FF)) + +struct exp8_sig16 { int_fast8_t exp; uint_fast16_t sig; }; +struct exp8_sig16 softfloat_normSubnormalF16Sig( uint_fast16_t ); + +float16_t softfloat_roundPackToF16( bool, int_fast16_t, uint_fast16_t ); +float16_t softfloat_normRoundPackToF16( bool, int_fast16_t, uint_fast16_t ); + +float16_t softfloat_addMagsF16( uint_fast16_t, uint_fast16_t ); +float16_t softfloat_subMagsF16( uint_fast16_t, uint_fast16_t ); +float16_t + softfloat_mulAddF16( + uint_fast16_t, uint_fast16_t, uint_fast16_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF32UI( a ) ((bool) ((uint32_t) (a)>>31)) +#define expF32UI( a ) ((int_fast16_t) ((a)>>23) & 0xFF) +#define fracF32UI( a ) ((a) & 0x007FFFFF) +#define packToF32UI( sign, exp, sig ) (((uint32_t) (sign)<<31) + ((uint32_t) (exp)<<23) + (sig)) + +#define isNaNF32UI( a ) (((~(a) & 0x7F800000) == 0) && ((a) & 0x007FFFFF)) + +struct exp16_sig32 { int_fast16_t exp; uint_fast32_t sig; }; +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t ); + +float32_t softfloat_roundPackToF32( bool, int_fast16_t, uint_fast32_t ); +float32_t softfloat_normRoundPackToF32( bool, int_fast16_t, uint_fast32_t ); + +float32_t softfloat_addMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t softfloat_subMagsF32( uint_fast32_t, uint_fast32_t ); +float32_t + softfloat_mulAddF32( + uint_fast32_t, uint_fast32_t, uint_fast32_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF64UI( a ) ((bool) ((uint64_t) (a)>>63)) +#define expF64UI( a ) ((int_fast16_t) ((a)>>52) & 0x7FF) +#define fracF64UI( a ) ((a) & UINT64_C( 0x000FFFFFFFFFFFFF )) +#define packToF64UI( sign, exp, sig ) ((uint64_t) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<52) + (sig))) + +#define isNaNF64UI( a ) (((~(a) & UINT64_C( 0x7FF0000000000000 )) == 0) && ((a) & UINT64_C( 0x000FFFFFFFFFFFFF ))) + +struct exp16_sig64 { int_fast16_t exp; uint_fast64_t sig; }; +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t ); + +float64_t softfloat_roundPackToF64( bool, int_fast16_t, uint_fast64_t ); +float64_t softfloat_normRoundPackToF64( bool, int_fast16_t, uint_fast64_t ); + +float64_t softfloat_addMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t softfloat_subMagsF64( uint_fast64_t, uint_fast64_t, bool ); +float64_t + softfloat_mulAddF64( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signExtF80UI64( a64 ) ((bool) ((uint16_t) (a64)>>15)) +#define expExtF80UI64( a64 ) ((a64) & 0x7FFF) +#define packToExtF80UI64( sign, exp ) ((uint_fast16_t) (sign)<<15 | (exp)) + +#define isNaNExtF80UI( a64, a0 ) ((((a64) & 0x7FFF) == 0x7FFF) && ((a0) & UINT64_C( 0x7FFFFFFFFFFFFFFF ))) + +#ifdef SOFTFLOAT_FAST_INT64 + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ + +struct exp32_sig64 { int_fast32_t exp; uint64_t sig; }; +struct exp32_sig64 softfloat_normSubnormalExtF80Sig( uint_fast64_t ); + +extFloat80_t + softfloat_roundPackToExtF80( + bool, int_fast32_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); +extFloat80_t + softfloat_normRoundPackToExtF80( + bool, int_fast32_t, uint_fast64_t, uint_fast64_t, uint_fast8_t ); + +extFloat80_t + softfloat_addMagsExtF80( + uint_fast16_t, uint_fast64_t, uint_fast16_t, uint_fast64_t, bool ); +extFloat80_t + softfloat_subMagsExtF80( + uint_fast16_t, uint_fast64_t, uint_fast16_t, uint_fast64_t, bool ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF128UI64( a64 ) ((bool) ((uint64_t) (a64)>>63)) +#define expF128UI64( a64 ) ((int_fast32_t) ((a64)>>48) & 0x7FFF) +#define fracF128UI64( a64 ) ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )) +#define packToF128UI64( sign, exp, sig64 ) (((uint_fast64_t) (sign)<<63) + ((uint_fast64_t) (exp)<<48) + (sig64)) + +#define isNaNF128UI( a64, a0 ) (((~(a64) & UINT64_C( 0x7FFF000000000000 )) == 0) && (a0 || ((a64) & UINT64_C( 0x0000FFFFFFFFFFFF )))) + +struct exp32_sig128 { int_fast32_t exp; struct uint128 sig; }; +struct exp32_sig128 + softfloat_normSubnormalF128Sig( uint_fast64_t, uint_fast64_t ); + +float128_t + softfloat_roundPackToF128( + bool, int_fast32_t, uint_fast64_t, uint_fast64_t, uint_fast64_t ); +float128_t + softfloat_normRoundPackToF128( + bool, int_fast32_t, uint_fast64_t, uint_fast64_t ); + +float128_t + softfloat_addMagsF128( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast64_t, bool ); +float128_t + softfloat_subMagsF128( + uint_fast64_t, uint_fast64_t, uint_fast64_t, uint_fast64_t, bool ); +float128_t + softfloat_mulAddF128( + uint_fast64_t, + uint_fast64_t, + uint_fast64_t, + uint_fast64_t, + uint_fast64_t, + uint_fast64_t, + uint_fast8_t + ); + +#else + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ + +bool + softfloat_tryPropagateNaNExtF80M( + const struct extFloat80M *, + const struct extFloat80M *, + struct extFloat80M * + ); +void softfloat_invalidExtF80M( struct extFloat80M * ); + +int softfloat_normExtF80SigM( uint64_t * ); + +void + softfloat_roundPackMToExtF80M( + bool, int32_t, uint32_t *, uint_fast8_t, struct extFloat80M * ); +void + softfloat_normRoundPackMToExtF80M( + bool, int32_t, uint32_t *, uint_fast8_t, struct extFloat80M * ); + +void + softfloat_addExtF80M( + const struct extFloat80M *, + const struct extFloat80M *, + struct extFloat80M *, + bool + ); + +int + softfloat_compareNonnormExtF80M( + const struct extFloat80M *, const struct extFloat80M * ); + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define signF128UI96( a96 ) ((bool) ((uint32_t) (a96)>>31)) +#define expF128UI96( a96 ) ((int32_t) ((a96)>>16) & 0x7FFF) +#define fracF128UI96( a96 ) ((a96) & 0x0000FFFF) +#define packToF128UI96( sign, exp, sig96 ) (((uint32_t) (sign)<<31) + ((uint32_t) (exp)<<16) + (sig96)) + +bool softfloat_isNaNF128M( const uint32_t * ); + +bool + softfloat_tryPropagateNaNF128M( + const uint32_t *, const uint32_t *, uint32_t * ); +void softfloat_invalidF128M( uint32_t * ); + +int softfloat_shiftNormSigF128M( const uint32_t *, uint_fast8_t, uint32_t * ); + +void softfloat_roundPackMToF128M( bool, int32_t, uint32_t *, uint32_t * ); +void softfloat_normRoundPackMToF128M( bool, int32_t, uint32_t *, uint32_t * ); + +void + softfloat_addF128M( const uint32_t *, const uint32_t *, uint32_t *, bool ); +void + softfloat_mulAddF128M( + const uint32_t *, + const uint32_t *, + const uint32_t *, + uint32_t *, + uint_fast8_t + ); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/platform.h b/vendor/riscv-isa-sim/softfloat/platform.h new file mode 100644 index 00000000..55de1941 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/platform.h @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#include "config.h" +#ifndef WORDS_BIGENDIAN +#define LITTLEENDIAN 1 +#endif + +#define INLINE_LEVEL 5 +#define SOFTFLOAT_FAST_INT64 +#define SOFTFLOAT_FAST_DIV64TO32 +#define SOFTFLOAT_ROUND_ODD + +/*---------------------------------------------------------------------------- +*----------------------------------------------------------------------------*/ +#define INLINE static inline + diff --git a/vendor/riscv-isa-sim/softfloat/primitiveTypes.h b/vendor/riscv-isa-sim/softfloat/primitiveTypes.h new file mode 100644 index 00000000..b1120491 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/primitiveTypes.h @@ -0,0 +1,86 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitiveTypes_h +#define primitiveTypes_h 1 + +#include +#include "platform.h" + +#ifdef SOFTFLOAT_FAST_INT64 + +#ifdef LITTLEENDIAN +struct uint128 { uint64_t v0, v64; }; +struct uint64_extra { uint64_t extra, v; }; +struct uint128_extra { uint64_t extra; struct uint128 v; }; +#else +struct uint128 { uint64_t v64, v0; }; +struct uint64_extra { uint64_t v, extra; }; +struct uint128_extra { struct uint128 v; uint64_t extra; }; +#endif + +#endif + +/*---------------------------------------------------------------------------- +| These macros are used to isolate the differences in word order between big- +| endian and little-endian platforms. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +#define wordIncr 1 +#define indexWord( total, n ) (n) +#define indexWordHi( total ) ((total) - 1) +#define indexWordLo( total ) 0 +#define indexMultiword( total, m, n ) (n) +#define indexMultiwordHi( total, n ) ((total) - (n)) +#define indexMultiwordLo( total, n ) 0 +#define indexMultiwordHiBut( total, n ) (n) +#define indexMultiwordLoBut( total, n ) 0 +#define INIT_UINTM4( v3, v2, v1, v0 ) { v0, v1, v2, v3 } +#else +#define wordIncr -1 +#define indexWord( total, n ) ((total) - 1 - (n)) +#define indexWordHi( total ) 0 +#define indexWordLo( total ) ((total) - 1) +#define indexMultiword( total, m, n ) ((total) - 1 - (m)) +#define indexMultiwordHi( total, n ) 0 +#define indexMultiwordLo( total, n ) ((total) - (n)) +#define indexMultiwordHiBut( total, n ) 0 +#define indexMultiwordLoBut( total, n ) (n) +#define INIT_UINTM4( v3, v2, v1, v0 ) { v3, v2, v1, v0 } +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/primitives.h b/vendor/riscv-isa-sim/softfloat/primitives.h new file mode 100644 index 00000000..1acc8a8a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/primitives.h @@ -0,0 +1,1168 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef primitives_h +#define primitives_h 1 + +#include +#include +#include "primitiveTypes.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef softfloat_shortShiftRightJam64 +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must be in +| the range 1 to 63. If any nonzero bits are shifted off, they are "jammed" +| into the least-significant bit of the shifted value by setting the least- +| significant bit to 1. This shifted-and-jammed value is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +uint64_t softfloat_shortShiftRightJam64( uint64_t a, uint_fast8_t dist ) + { return a>>dist | ((a & (((uint_fast64_t) 1<>dist | ((uint32_t) (a<<(-dist & 31)) != 0) : (a != 0); +} +#else +uint32_t softfloat_shiftRightJam32( uint32_t a, uint_fast16_t dist ); +#endif +#endif + +#ifndef softfloat_shiftRightJam64 +/*---------------------------------------------------------------------------- +| Shifts 'a' right by the number of bits given in 'dist', which must not +| be zero. If any nonzero bits are shifted off, they are "jammed" into the +| least-significant bit of the shifted value by setting the least-significant +| bit to 1. This shifted-and-jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 64, the result will be either 0 or 1, depending on whether 'a' +| is zero or nonzero. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ) +{ + return + (dist < 63) ? a>>dist | ((uint64_t) (a<<(-dist & 63)) != 0) : (a != 0); +} +#else +uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ); +#endif +#endif + +/*---------------------------------------------------------------------------- +| A constant table that translates an 8-bit unsigned integer (the array index) +| into the number of leading 0 bits before the most-significant 1 of that +| integer. For integer zero (index 0), the corresponding table element is 8. +*----------------------------------------------------------------------------*/ +extern const uint_least8_t softfloat_countLeadingZeros8[256]; + +#ifndef softfloat_countLeadingZeros16 +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 16 is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE uint_fast8_t softfloat_countLeadingZeros16( uint16_t a ) +{ + uint_fast8_t count = 8; + if ( 0x100 <= a ) { + count = 0; + a >>= 8; + } + count += softfloat_countLeadingZeros8[a]; + return count; +} +#else +uint_fast8_t softfloat_countLeadingZeros16( uint16_t a ); +#endif +#endif + +#ifndef softfloat_countLeadingZeros32 +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 32 is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ) +{ + uint_fast8_t count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[a>>24]; + return count; +} +#else +uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ); +#endif +#endif + +#ifndef softfloat_countLeadingZeros64 +/*---------------------------------------------------------------------------- +| Returns the number of leading 0 bits before the most-significant 1 bit of +| 'a'. If 'a' is zero, 64 is returned. +*----------------------------------------------------------------------------*/ +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ); +#endif + +extern const uint16_t softfloat_approxRecip_1k0s[16]; +extern const uint16_t softfloat_approxRecip_1k1s[16]; + +#ifndef softfloat_approxRecip32_1 +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the number represented by 'a', +| where 'a' is interpreted as an unsigned fixed-point number with one integer +| bit and 31 fraction bits. The 'a' input must be "normalized", meaning that +| its most-significant bit (bit 31) must be 1. Thus, if A is the value of +| the fixed-point interpretation of 'a', then 1 <= A < 2. The returned value +| is interpreted as a pure unsigned fraction, having no integer bits and 32 +| fraction bits. The approximation returned is never greater than the true +| reciprocal 1/A, and it differs from the true reciprocal by at most 2.006 ulp +| (units in the last place). +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_DIV64TO32 +#define softfloat_approxRecip32_1( a ) ((uint32_t) (UINT64_C( 0x7FFFFFFFFFFFFFFF ) / (uint32_t) (a))) +#else +uint32_t softfloat_approxRecip32_1( uint32_t a ); +#endif +#endif + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[16]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[16]; + +#ifndef softfloat_approxRecipSqrt32_1 +/*---------------------------------------------------------------------------- +| Returns an approximation to the reciprocal of the square root of the number +| represented by 'a', where 'a' is interpreted as an unsigned fixed-point +| number either with one integer bit and 31 fraction bits or with two integer +| bits and 30 fraction bits. The format of 'a' is determined by 'oddExpA', +| which must be either 0 or 1. If 'oddExpA' is 1, 'a' is interpreted as +| having one integer bit, and if 'oddExpA' is 0, 'a' is interpreted as having +| two integer bits. The 'a' input must be "normalized", meaning that its +| most-significant bit (bit 31) must be 1. Thus, if A is the value of the +| fixed-point interpretation of 'a', it follows that 1 <= A < 2 when 'oddExpA' +| is 1, and 2 <= A < 4 when 'oddExpA' is 0. +| The returned value is interpreted as a pure unsigned fraction, having +| no integer bits and 32 fraction bits. The approximation returned is never +| greater than the true reciprocal 1/sqrt(A), and it differs from the true +| reciprocal by at most 2.06 ulp (units in the last place). The approximation +| returned is also always within the range 0.5 to 1; thus, the most- +| significant bit of the result is always set. +*----------------------------------------------------------------------------*/ +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ); +#endif + +#ifdef SOFTFLOAT_FAST_INT64 + +/*---------------------------------------------------------------------------- +| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +#ifndef softfloat_eq128 +/*---------------------------------------------------------------------------- +| Returns true if the 128-bit unsigned integer formed by concatenating 'a64' +| and 'a0' is equal to the 128-bit unsigned integer formed by concatenating +| 'b64' and 'b0'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (1 <= INLINE_LEVEL) +INLINE +bool softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return (a64 == b64) && (a0 == b0); } +#else +bool softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_le128 +/*---------------------------------------------------------------------------- +| Returns true if the 128-bit unsigned integer formed by concatenating 'a64' +| and 'a0' is less than or equal to the 128-bit unsigned integer formed by +| concatenating 'b64' and 'b0'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +bool softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return (a64 < b64) || ((a64 == b64) && (a0 <= b0)); } +#else +bool softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_lt128 +/*---------------------------------------------------------------------------- +| Returns true if the 128-bit unsigned integer formed by concatenating 'a64' +| and 'a0' is less than the 128-bit unsigned integer formed by concatenating +| 'b64' and 'b0'. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +bool softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) + { return (a64 < b64) || ((a64 == b64) && (a0 < b0)); } +#else +bool softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_shortShiftLeft128 +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' left by the +| number of bits given in 'dist', which must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +struct uint128 + softfloat_shortShiftLeft128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + z.v64 = a64<>(-dist & 63); + z.v0 = a0<>dist; + z.v0 = a64<<(-dist & 63) | a0>>dist; + return z; +} +#else +struct uint128 + softfloat_shortShiftRight128( uint64_t a64, uint64_t a0, uint_fast8_t dist ); +#endif +#endif + +#ifndef softfloat_shortShiftRightJam64Extra +/*---------------------------------------------------------------------------- +| This function is the same as 'softfloat_shiftRightJam64Extra' (below), +| except that 'dist' must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +struct uint64_extra + softfloat_shortShiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast8_t dist ) +{ + struct uint64_extra z; + z.v = a>>dist; + z.extra = a<<(-dist & 63) | (extra != 0); + return z; +} +#else +struct uint64_extra + softfloat_shortShiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast8_t dist ); +#endif +#endif + +#ifndef softfloat_shortShiftRightJam128 +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the +| number of bits given in 'dist', which must be in the range 1 to 63. If any +| nonzero bits are shifted off, they are "jammed" into the least-significant +| bit of the shifted value by setting the least-significant bit to 1. This +| shifted-and-jammed value is returned. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE +struct uint128 + softfloat_shortShiftRightJam128( + uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + uint_fast8_t negDist = -dist; + struct uint128 z; + z.v64 = a64>>dist; + z.v0 = + a64<<(negDist & 63) | a0>>dist + | ((uint64_t) (a0<<(negDist & 63)) != 0); + return z; +} +#else +struct uint128 + softfloat_shortShiftRightJam128( + uint64_t a64, uint64_t a0, uint_fast8_t dist ); +#endif +#endif + +#ifndef softfloat_shortShiftRightJam128Extra +/*---------------------------------------------------------------------------- +| This function is the same as 'softfloat_shiftRightJam128Extra' (below), +| except that 'dist' must be in the range 1 to 63. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE +struct uint128_extra + softfloat_shortShiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast8_t dist ) +{ + uint_fast8_t negDist = -dist; + struct uint128_extra z; + z.v.v64 = a64>>dist; + z.v.v0 = a64<<(negDist & 63) | a0>>dist; + z.extra = a0<<(negDist & 63) | (extra != 0); + return z; +} +#else +struct uint128_extra + softfloat_shortShiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast8_t dist ); +#endif +#endif + +#ifndef softfloat_shiftRightJam64Extra +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a' and 'extra' right by 64 +| _plus_ the number of bits given in 'dist', which must not be zero. This +| shifted value is at most 64 nonzero bits and is returned in the 'v' field +| of the 'struct uint64_extra' result. The 64-bit 'extra' field of the result +| contains a value formed as follows from the bits that were shifted off: The +| _last_ bit shifted off is the most-significant bit of the 'extra' field, and +| the other 63 bits of the 'extra' field are all zero if and only if _all_but_ +| _the_last_ bits shifted off were all zero. +| (This function makes more sense if 'a' and 'extra' are considered to form +| an unsigned fixed-point number with binary point between 'a' and 'extra'. +| This fixed-point value is shifted right by the number of bits given in +| 'dist', and the integer part of this shifted value is returned in the 'v' +| field of the result. The fractional part of the shifted value is modified +| as described above and returned in the 'extra' field of the result.) +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (4 <= INLINE_LEVEL) +INLINE +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ) +{ + struct uint64_extra z; + if ( dist < 64 ) { + z.v = a>>dist; + z.extra = a<<(-dist & 63); + } else { + z.v = 0; + z.extra = (dist == 64) ? a : (a != 0); + } + z.extra |= (extra != 0); + return z; +} +#else +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ); +#endif +#endif + +#ifndef softfloat_shiftRightJam128 +/*---------------------------------------------------------------------------- +| Shifts the 128 bits formed by concatenating 'a64' and 'a0' right by the +| number of bits given in 'dist', which must not be zero. If any nonzero bits +| are shifted off, they are "jammed" into the least-significant bit of the +| shifted value by setting the least-significant bit to 1. This shifted-and- +| jammed value is returned. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than 128, the result will be either 0 or 1, depending on whether the +| original 128 bits are all zeros. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ); +#endif + +#ifndef softfloat_shiftRightJam128Extra +/*---------------------------------------------------------------------------- +| Shifts the 192 bits formed by concatenating 'a64', 'a0', and 'extra' right +| by 64 _plus_ the number of bits given in 'dist', which must not be zero. +| This shifted value is at most 128 nonzero bits and is returned in the 'v' +| field of the 'struct uint128_extra' result. The 64-bit 'extra' field of the +| result contains a value formed as follows from the bits that were shifted +| off: The _last_ bit shifted off is the most-significant bit of the 'extra' +| field, and the other 63 bits of the 'extra' field are all zero if and only +| if _all_but_the_last_ bits shifted off were all zero. +| (This function makes more sense if 'a64', 'a0', and 'extra' are considered +| to form an unsigned fixed-point number with binary point between 'a0' and +| 'extra'. This fixed-point value is shifted right by the number of bits +| given in 'dist', and the integer part of this shifted value is returned +| in the 'v' field of the result. The fractional part of the shifted value +| is modified as described above and returned in the 'extra' field of the +| result.) +*----------------------------------------------------------------------------*/ +struct uint128_extra + softfloat_shiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast32_t dist ); +#endif + +#ifndef softfloat_shiftRightJam256M +/*---------------------------------------------------------------------------- +| Shifts the 256-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', which must not be zero. If any nonzero bits are +| shifted off, they are "jammed" into the least-significant bit of the shifted +| value by setting the least-significant bit to 1. This shifted-and-jammed +| value is stored at the location pointed to by 'zPtr'. Each of 'aPtr' and +| 'zPtr' points to an array of four 64-bit elements that concatenate in the +| platform's normal endian order to form a 256-bit integer. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' +| is greater than 256, the stored result will be either 0 or 1, depending on +| whether the original 256 bits are all zeros. +*----------------------------------------------------------------------------*/ +void + softfloat_shiftRightJam256M( + const uint64_t *aPtr, uint_fast32_t dist, uint64_t *zPtr ); +#endif + +#ifndef softfloat_add128 +/*---------------------------------------------------------------------------- +| Returns the sum of the 128-bit integer formed by concatenating 'a64' and +| 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. The +| addition is modulo 2^128, so any carry out is lost. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 + b0; + z.v64 = a64 + b64 + (z.v0 < a0); + return z; +} +#else +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_add256M +/*---------------------------------------------------------------------------- +| Adds the two 256-bit integers pointed to by 'aPtr' and 'bPtr'. The addition +| is modulo 2^256, so any carry out is lost. The sum is stored at the +| location pointed to by 'zPtr'. Each of 'aPtr', 'bPtr', and 'zPtr' points to +| an array of four 64-bit elements that concatenate in the platform's normal +| endian order to form a 256-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_add256M( + const uint64_t *aPtr, const uint64_t *bPtr, uint64_t *zPtr ); +#endif + +#ifndef softfloat_sub128 +/*---------------------------------------------------------------------------- +| Returns the difference of the 128-bit integer formed by concatenating 'a64' +| and 'a0' and the 128-bit integer formed by concatenating 'b64' and 'b0'. +| The subtraction is modulo 2^128, so any borrow out (carry out) is lost. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + z.v0 = a0 - b0; + z.v64 = a64 - b64; + z.v64 -= (a0 < b0); + return z; +} +#else +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ); +#endif +#endif + +#ifndef softfloat_sub256M +/*---------------------------------------------------------------------------- +| Subtracts the 256-bit integer pointed to by 'bPtr' from the 256-bit integer +| pointed to by 'aPtr'. The addition is modulo 2^256, so any borrow out +| (carry out) is lost. The difference is stored at the location pointed to +| by 'zPtr'. Each of 'aPtr', 'bPtr', and 'zPtr' points to an array of four +| 64-bit elements that concatenate in the platform's normal endian order to +| form a 256-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_sub256M( + const uint64_t *aPtr, const uint64_t *bPtr, uint64_t *zPtr ); +#endif + +#ifndef softfloat_mul64ByShifted32To128 +/*---------------------------------------------------------------------------- +| Returns the 128-bit product of 'a', 'b', and 2^32. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (3 <= INLINE_LEVEL) +INLINE struct uint128 softfloat_mul64ByShifted32To128( uint64_t a, uint32_t b ) +{ + uint_fast64_t mid; + struct uint128 z; + mid = (uint_fast64_t) (uint32_t) a * b; + z.v0 = mid<<32; + z.v64 = (uint_fast64_t) (uint32_t) (a>>32) * b + (mid>>32); + return z; +} +#else +struct uint128 softfloat_mul64ByShifted32To128( uint64_t a, uint32_t b ); +#endif +#endif + +#ifndef softfloat_mul64To128 +/*---------------------------------------------------------------------------- +| Returns the 128-bit product of 'a' and 'b'. +*----------------------------------------------------------------------------*/ +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ); +#endif + +#ifndef softfloat_mul128By32 +/*---------------------------------------------------------------------------- +| Returns the product of the 128-bit integer formed by concatenating 'a64' and +| 'a0', multiplied by 'b'. The multiplication is modulo 2^128; any overflow +| bits are discarded. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (4 <= INLINE_LEVEL) +INLINE +struct uint128 softfloat_mul128By32( uint64_t a64, uint64_t a0, uint32_t b ) +{ + struct uint128 z; + uint_fast64_t mid; + uint_fast32_t carry; + z.v0 = a0 * b; + mid = (uint_fast64_t) (uint32_t) (a0>>32) * b; + carry = (uint32_t) ((uint_fast32_t) (z.v0>>32) - (uint_fast32_t) mid); + z.v64 = a64 * b + (uint_fast32_t) ((mid + carry)>>32); + return z; +} +#else +struct uint128 softfloat_mul128By32( uint64_t a64, uint64_t a0, uint32_t b ); +#endif +#endif + +#ifndef softfloat_mul128To256M +/*---------------------------------------------------------------------------- +| Multiplies the 128-bit unsigned integer formed by concatenating 'a64' and +| 'a0' by the 128-bit unsigned integer formed by concatenating 'b64' and +| 'b0'. The 256-bit product is stored at the location pointed to by 'zPtr'. +| Argument 'zPtr' points to an array of four 64-bit elements that concatenate +| in the platform's normal endian order to form a 256-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_mul128To256M( + uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0, uint64_t *zPtr ); +#endif + +#else + +/*---------------------------------------------------------------------------- +| The following functions are needed only when 'SOFTFLOAT_FAST_INT64' is not +| defined. +*----------------------------------------------------------------------------*/ + +#ifndef softfloat_compare96M +/*---------------------------------------------------------------------------- +| Compares the two 96-bit unsigned integers pointed to by 'aPtr' and 'bPtr'. +| Returns -1 if the first integer (A) is less than the second (B); returns 0 +| if the two integers are equal; and returns +1 if the first integer (A) +| is greater than the second (B). (The result is thus the signum of A - B.) +| Each of 'aPtr' and 'bPtr' points to an array of three 32-bit elements that +| concatenate in the platform's normal endian order to form a 96-bit integer. +*----------------------------------------------------------------------------*/ +int_fast8_t softfloat_compare96M( const uint32_t *aPtr, const uint32_t *bPtr ); +#endif + +#ifndef softfloat_compare128M +/*---------------------------------------------------------------------------- +| Compares the two 128-bit unsigned integers pointed to by 'aPtr' and 'bPtr'. +| Returns -1 if the first integer (A) is less than the second (B); returns 0 +| if the two integers are equal; and returns +1 if the first integer (A) +| is greater than the second (B). (The result is thus the signum of A - B.) +| Each of 'aPtr' and 'bPtr' points to an array of four 32-bit elements that +| concatenate in the platform's normal endian order to form a 128-bit integer. +*----------------------------------------------------------------------------*/ +int_fast8_t + softfloat_compare128M( const uint32_t *aPtr, const uint32_t *bPtr ); +#endif + +#ifndef softfloat_shortShiftLeft64To96M +/*---------------------------------------------------------------------------- +| Extends 'a' to 96 bits and shifts the value left by the number of bits given +| in 'dist', which must be in the range 1 to 31. The result is stored at the +| location pointed to by 'zPtr'. Argument 'zPtr' points to an array of three +| 32-bit elements that concatenate in the platform's normal endian order to +| form a 96-bit integer. +*----------------------------------------------------------------------------*/ +#if defined INLINE_LEVEL && (2 <= INLINE_LEVEL) +INLINE +void + softfloat_shortShiftLeft64To96M( + uint64_t a, uint_fast8_t dist, uint32_t *zPtr ) +{ + zPtr[indexWord( 3, 0 )] = (uint32_t) a<>= 32 - dist; + zPtr[indexWord( 3, 2 )] = a>>32; + zPtr[indexWord( 3, 1 )] = a; +} +#else +void + softfloat_shortShiftLeft64To96M( + uint64_t a, uint_fast8_t dist, uint32_t *zPtr ); +#endif +#endif + +#ifndef softfloat_shortShiftLeftM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' left by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must be in the range 1 to 31. Any nonzero bits shifted off are lost. The +| shifted N-bit result is stored at the location pointed to by 'zPtr'. Each +| of 'aPtr' and 'zPtr' points to a 'size_words'-long array of 32-bit elements +| that concatenate in the platform's normal endian order to form an N-bit +| integer. +*----------------------------------------------------------------------------*/ +void + softfloat_shortShiftLeftM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint_fast8_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shortShiftLeft96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftLeftM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftLeft96M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 3, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftLeft128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftLeftM' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftLeft128M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 4, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftLeft160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftLeftM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftLeft160M( aPtr, dist, zPtr ) softfloat_shortShiftLeftM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftLeftM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' left by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must not be zero. Any nonzero bits shifted off are lost. The shifted +| N-bit result is stored at the location pointed to by 'zPtr'. Each of 'aPtr' +| and 'zPtr' points to a 'size_words'-long array of 32-bit elements that +| concatenate in the platform's normal endian order to form an N-bit integer. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than N, the stored result will be 0. +*----------------------------------------------------------------------------*/ +void + softfloat_shiftLeftM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint32_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shiftLeft96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftLeftM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftLeft96M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 3, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftLeft128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftLeftM' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftLeft128M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 4, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftLeft160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftLeftM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftLeft160M( aPtr, dist, zPtr ) softfloat_shiftLeftM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftRightM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must be in the range 1 to 31. Any nonzero bits shifted off are lost. The +| shifted N-bit result is stored at the location pointed to by 'zPtr'. Each +| of 'aPtr' and 'zPtr' points to a 'size_words'-long array of 32-bit elements +| that concatenate in the platform's normal endian order to form an N-bit +| integer. +*----------------------------------------------------------------------------*/ +void + softfloat_shortShiftRightM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint_fast8_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shortShiftRight128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftRightM' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftRight128M( aPtr, dist, zPtr ) softfloat_shortShiftRightM( 4, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftRight160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftRightM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftRight160M( aPtr, dist, zPtr ) softfloat_shortShiftRightM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shortShiftRightJamM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must be in the range 1 to 31. If any nonzero bits are shifted off, they are +| "jammed" into the least-significant bit of the shifted value by setting the +| least-significant bit to 1. This shifted-and-jammed N-bit result is stored +| at the location pointed to by 'zPtr'. Each of 'aPtr' and 'zPtr' points +| to a 'size_words'-long array of 32-bit elements that concatenate in the +| platform's normal endian order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_shortShiftRightJamM( + uint_fast8_t, const uint32_t *, uint_fast8_t, uint32_t * ); +#endif + +#ifndef softfloat_shortShiftRightJam160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shortShiftRightJamM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shortShiftRightJam160M( aPtr, dist, zPtr ) softfloat_shortShiftRightJamM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftRightM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must not be zero. Any nonzero bits shifted off are lost. The shifted +| N-bit result is stored at the location pointed to by 'zPtr'. Each of 'aPtr' +| and 'zPtr' points to a 'size_words'-long array of 32-bit elements that +| concatenate in the platform's normal endian order to form an N-bit integer. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' is +| greater than N, the stored result will be 0. +*----------------------------------------------------------------------------*/ +void + softfloat_shiftRightM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint32_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shiftRight96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftRightM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftRight96M( aPtr, dist, zPtr ) softfloat_shiftRightM( 3, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftRightJamM +/*---------------------------------------------------------------------------- +| Shifts the N-bit unsigned integer pointed to by 'aPtr' right by the number +| of bits given in 'dist', where N = 'size_words' * 32. The value of 'dist' +| must not be zero. If any nonzero bits are shifted off, they are "jammed" +| into the least-significant bit of the shifted value by setting the least- +| significant bit to 1. This shifted-and-jammed N-bit result is stored +| at the location pointed to by 'zPtr'. Each of 'aPtr' and 'zPtr' points +| to a 'size_words'-long array of 32-bit elements that concatenate in the +| platform's normal endian order to form an N-bit integer. +| The value of 'dist' can be arbitrarily large. In particular, if 'dist' +| is greater than N, the stored result will be either 0 or 1, depending on +| whether the original N bits are all zeros. +*----------------------------------------------------------------------------*/ +void + softfloat_shiftRightJamM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint32_t dist, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_shiftRightJam96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftRightJamM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftRightJam96M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 3, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftRightJam128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftRightJamM' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftRightJam128M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 4, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_shiftRightJam160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_shiftRightJamM' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_shiftRightJam160M( aPtr, dist, zPtr ) softfloat_shiftRightJamM( 5, aPtr, dist, zPtr ) +#endif + +#ifndef softfloat_addM +/*---------------------------------------------------------------------------- +| Adds the two N-bit integers pointed to by 'aPtr' and 'bPtr', where N = +| 'size_words' * 32. The addition is modulo 2^N, so any carry out is lost. +| The N-bit sum is stored at the location pointed to by 'zPtr'. Each of +| 'aPtr', 'bPtr', and 'zPtr' points to a 'size_words'-long array of 32-bit +| elements that concatenate in the platform's normal endian order to form an +| N-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_addM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_add96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addM' with 'size_words' +| = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_add96M( aPtr, bPtr, zPtr ) softfloat_addM( 3, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_add128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addM' with 'size_words' +| = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_add128M( aPtr, bPtr, zPtr ) softfloat_addM( 4, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_add160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addM' with 'size_words' +| = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_add160M( aPtr, bPtr, zPtr ) softfloat_addM( 5, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_addCarryM +/*---------------------------------------------------------------------------- +| Adds the two N-bit unsigned integers pointed to by 'aPtr' and 'bPtr', where +| N = 'size_words' * 32, plus 'carry', which must be either 0 or 1. The N-bit +| sum (modulo 2^N) is stored at the location pointed to by 'zPtr', and any +| carry out is returned as the result. Each of 'aPtr', 'bPtr', and 'zPtr' +| points to a 'size_words'-long array of 32-bit elements that concatenate in +| the platform's normal endian order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +uint_fast8_t + softfloat_addCarryM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint_fast8_t carry, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_addComplCarryM +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addCarryM', except that +| the value of the unsigned integer pointed to by 'bPtr' is bit-wise completed +| before the addition. +*----------------------------------------------------------------------------*/ +uint_fast8_t + softfloat_addComplCarryM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint_fast8_t carry, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_addComplCarry96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_addComplCarryM' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_addComplCarry96M( aPtr, bPtr, carry, zPtr ) softfloat_addComplCarryM( 3, aPtr, bPtr, carry, zPtr ) +#endif + +#ifndef softfloat_negXM +/*---------------------------------------------------------------------------- +| Replaces the N-bit unsigned integer pointed to by 'zPtr' by the +| 2s-complement of itself, where N = 'size_words' * 32. Argument 'zPtr' +| points to a 'size_words'-long array of 32-bit elements that concatenate in +| the platform's normal endian order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +void softfloat_negXM( uint_fast8_t size_words, uint32_t *zPtr ); +#endif + +#ifndef softfloat_negX96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_negXM' with 'size_words' +| = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_negX96M( zPtr ) softfloat_negXM( 3, zPtr ) +#endif + +#ifndef softfloat_negX128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_negXM' with 'size_words' +| = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_negX128M( zPtr ) softfloat_negXM( 4, zPtr ) +#endif + +#ifndef softfloat_negX160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_negXM' with 'size_words' +| = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_negX160M( zPtr ) softfloat_negXM( 5, zPtr ) +#endif + +#ifndef softfloat_negX256M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_negXM' with 'size_words' +| = 8 (N = 256). +*----------------------------------------------------------------------------*/ +#define softfloat_negX256M( zPtr ) softfloat_negXM( 8, zPtr ) +#endif + +#ifndef softfloat_sub1XM +/*---------------------------------------------------------------------------- +| Subtracts 1 from the N-bit integer pointed to by 'zPtr', where N = +| 'size_words' * 32. The subtraction is modulo 2^N, so any borrow out (carry +| out) is lost. Argument 'zPtr' points to a 'size_words'-long array of 32-bit +| elements that concatenate in the platform's normal endian order to form an +| N-bit integer. +*----------------------------------------------------------------------------*/ +void softfloat_sub1XM( uint_fast8_t size_words, uint32_t *zPtr ); +#endif + +#ifndef softfloat_sub1X96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_sub1XM' with 'size_words' +| = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_sub1X96M( zPtr ) softfloat_sub1XM( 3, zPtr ) +#endif + +#ifndef softfloat_sub1X160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_sub1XM' with 'size_words' +| = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_sub1X160M( zPtr ) softfloat_sub1XM( 5, zPtr ) +#endif + +#ifndef softfloat_subM +/*---------------------------------------------------------------------------- +| Subtracts the two N-bit integers pointed to by 'aPtr' and 'bPtr', where N = +| 'size_words' * 32. The subtraction is modulo 2^N, so any borrow out (carry +| out) is lost. The N-bit difference is stored at the location pointed to by +| 'zPtr'. Each of 'aPtr', 'bPtr', and 'zPtr' points to a 'size_words'-long +| array of 32-bit elements that concatenate in the platform's normal endian +| order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_subM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_sub96M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_subM' with 'size_words' +| = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_sub96M( aPtr, bPtr, zPtr ) softfloat_subM( 3, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_sub128M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_subM' with 'size_words' +| = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_sub128M( aPtr, bPtr, zPtr ) softfloat_subM( 4, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_sub160M +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_subM' with 'size_words' +| = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_sub160M( aPtr, bPtr, zPtr ) softfloat_subM( 5, aPtr, bPtr, zPtr ) +#endif + +#ifndef softfloat_mul64To128M +/*---------------------------------------------------------------------------- +| Multiplies 'a' and 'b' and stores the 128-bit product at the location +| pointed to by 'zPtr'. Argument 'zPtr' points to an array of four 32-bit +| elements that concatenate in the platform's normal endian order to form a +| 128-bit integer. +*----------------------------------------------------------------------------*/ +void softfloat_mul64To128M( uint64_t a, uint64_t b, uint32_t *zPtr ); +#endif + +#ifndef softfloat_mul128MTo256M +/*---------------------------------------------------------------------------- +| Multiplies the two 128-bit unsigned integers pointed to by 'aPtr' and +| 'bPtr', and stores the 256-bit product at the location pointed to by 'zPtr'. +| Each of 'aPtr' and 'bPtr' points to an array of four 32-bit elements that +| concatenate in the platform's normal endian order to form a 128-bit integer. +| Argument 'zPtr' points to an array of eight 32-bit elements that concatenate +| to form a 256-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_mul128MTo256M( + const uint32_t *aPtr, const uint32_t *bPtr, uint32_t *zPtr ); +#endif + +#ifndef softfloat_remStepMBy32 +/*---------------------------------------------------------------------------- +| Performs a "remainder reduction step" as follows: Arguments 'remPtr' and +| 'bPtr' both point to N-bit unsigned integers, where N = 'size_words' * 32. +| Defining R and B as the values of those integers, the expression (R<<'dist') +| - B * q is computed modulo 2^N, and the N-bit result is stored at the +| location pointed to by 'zPtr'. Each of 'remPtr', 'bPtr', and 'zPtr' points +| to a 'size_words'-long array of 32-bit elements that concatenate in the +| platform's normal endian order to form an N-bit integer. +*----------------------------------------------------------------------------*/ +void + softfloat_remStepMBy32( + uint_fast8_t size_words, + const uint32_t *remPtr, + uint_fast8_t dist, + const uint32_t *bPtr, + uint32_t q, + uint32_t *zPtr + ); +#endif + +#ifndef softfloat_remStep96MBy32 +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_remStepMBy32' with +| 'size_words' = 3 (N = 96). +*----------------------------------------------------------------------------*/ +#define softfloat_remStep96MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 3, remPtr, dist, bPtr, q, zPtr ) +#endif + +#ifndef softfloat_remStep128MBy32 +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_remStepMBy32' with +| 'size_words' = 4 (N = 128). +*----------------------------------------------------------------------------*/ +#define softfloat_remStep128MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 4, remPtr, dist, bPtr, q, zPtr ) +#endif + +#ifndef softfloat_remStep160MBy32 +/*---------------------------------------------------------------------------- +| This function or macro is the same as 'softfloat_remStepMBy32' with +| 'size_words' = 5 (N = 160). +*----------------------------------------------------------------------------*/ +#define softfloat_remStep160MBy32( remPtr, dist, bPtr, q, zPtr ) softfloat_remStepMBy32( 5, remPtr, dist, bPtr, q, zPtr ) +#endif + +#endif + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_add128.c b/vendor/riscv-isa-sim/softfloat/s_add128.c new file mode 100644 index 00000000..8065656a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_add128.c @@ -0,0 +1,55 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_add128 + +struct uint128 + softfloat_add128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + + z.v0 = a0 + b0; + z.v64 = a64 + b64 + (z.v0 < a0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_add256M.c b/vendor/riscv-isa-sim/softfloat/s_add256M.c new file mode 100644 index 00000000..d07b0046 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_add256M.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_add256M + +void + softfloat_add256M( + const uint64_t *aPtr, const uint64_t *bPtr, uint64_t *zPtr ) +{ + unsigned int index; + uint_fast8_t carry; + uint64_t wordA, wordZ; + + index = indexWordLo( 4 ); + carry = 0; + for (;;) { + wordA = aPtr[index]; + wordZ = wordA + bPtr[index] + carry; + zPtr[index] = wordZ; + if ( index == indexWordHi( 4 ) ) break; + if ( wordZ != wordA ) carry = (wordZ < wordA); + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_addCarryM.c b/vendor/riscv-isa-sim/softfloat/s_addCarryM.c new file mode 100644 index 00000000..fae1db49 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addCarryM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_addCarryM + +uint_fast8_t + softfloat_addCarryM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint_fast8_t carry, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint32_t wordA, wordZ; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + for (;;) { + wordA = aPtr[index]; + wordZ = wordA + bPtr[index] + carry; + zPtr[index] = wordZ; + if ( wordZ != wordA ) carry = (wordZ < wordA); + if ( index == lastIndex ) break; + index += wordIncr; + } + return carry; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_addComplCarryM.c b/vendor/riscv-isa-sim/softfloat/s_addComplCarryM.c new file mode 100644 index 00000000..02f2bce4 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addComplCarryM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_addComplCarryM + +uint_fast8_t + softfloat_addComplCarryM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint_fast8_t carry, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint32_t wordA, wordZ; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + for (;;) { + wordA = aPtr[index]; + wordZ = wordA + ~bPtr[index] + carry; + zPtr[index] = wordZ; + if ( wordZ != wordA ) carry = (wordZ < wordA); + if ( index == lastIndex ) break; + index += wordIncr; + } + return carry; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_addM.c b/vendor/riscv-isa-sim/softfloat/s_addM.c new file mode 100644 index 00000000..a06eda65 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_addM + +void + softfloat_addM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint_fast8_t carry; + uint32_t wordA, wordZ; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + carry = 0; + for (;;) { + wordA = aPtr[index]; + wordZ = wordA + bPtr[index] + carry; + zPtr[index] = wordZ; + if ( index == lastIndex ) break; + if ( wordZ != wordA ) carry = (wordZ < wordA); + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_addMagsF128.c b/vendor/riscv-isa-sim/softfloat/s_addMagsF128.c new file mode 100644 index 00000000..292f0aa5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addMagsF128.c @@ -0,0 +1,154 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float128_t + softfloat_addMagsF128( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0, + bool signZ + ) +{ + int_fast32_t expA; + struct uint128 sigA; + int_fast32_t expB; + struct uint128 sigB; + int_fast32_t expDiff; + struct uint128 uiZ, sigZ; + int_fast32_t expZ; + uint_fast64_t sigZExtra; + struct uint128_extra sig128Extra; + union ui128_f128 uZ; + + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + expDiff = expA - expB; + if ( ! expDiff ) { + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; + uiZ.v64 = uiA64; + uiZ.v0 = uiA0; + goto uiZ; + } + sigZ = softfloat_add128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); + if ( ! expA ) { + uiZ.v64 = packToF128UI64( signZ, 0, sigZ.v64 ); + uiZ.v0 = sigZ.v0; + goto uiZ; + } + expZ = expA; + sigZ.v64 |= UINT64_C( 0x0002000000000000 ); + sigZExtra = 0; + goto shiftRight1; + } + if ( expDiff < 0 ) { + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + expZ = expB; + if ( expA ) { + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + } else { + ++expDiff; + sigZExtra = 0; + if ( ! expDiff ) goto newlyAligned; + } + sig128Extra = + softfloat_shiftRightJam128Extra( sigA.v64, sigA.v0, 0, -expDiff ); + sigA = sig128Extra.v; + sigZExtra = sig128Extra.extra; + } else { + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 ) goto propagateNaN; + uiZ.v64 = uiA64; + uiZ.v0 = uiA0; + goto uiZ; + } + expZ = expA; + if ( expB ) { + sigB.v64 |= UINT64_C( 0x0001000000000000 ); + } else { + --expDiff; + sigZExtra = 0; + if ( ! expDiff ) goto newlyAligned; + } + sig128Extra = + softfloat_shiftRightJam128Extra( sigB.v64, sigB.v0, 0, expDiff ); + sigB = sig128Extra.v; + sigZExtra = sig128Extra.extra; + } + newlyAligned: + sigZ = + softfloat_add128( + sigA.v64 | UINT64_C( 0x0001000000000000 ), + sigA.v0, + sigB.v64, + sigB.v0 + ); + --expZ; + if ( sigZ.v64 < UINT64_C( 0x0002000000000000 ) ) goto roundAndPack; + ++expZ; + shiftRight1: + sig128Extra = + softfloat_shortShiftRightJam128Extra( + sigZ.v64, sigZ.v0, sigZExtra, 1 ); + sigZ = sig128Extra.v; + sigZExtra = sig128Extra.extra; + roundAndPack: + return + softfloat_roundPackToF128( signZ, expZ, sigZ.v64, sigZ.v0, sigZExtra ); + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_addMagsF16.c b/vendor/riscv-isa-sim/softfloat/s_addMagsF16.c new file mode 100644 index 00000000..4204c1e0 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addMagsF16.c @@ -0,0 +1,183 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t softfloat_addMagsF16( uint_fast16_t uiA, uint_fast16_t uiB ) +{ + int_fast8_t expA; + uint_fast16_t sigA; + int_fast8_t expB; + uint_fast16_t sigB; + int_fast8_t expDiff; + uint_fast16_t uiZ; + bool signZ; + int_fast8_t expZ; + uint_fast16_t sigZ; + uint_fast16_t sigX, sigY; + int_fast8_t shiftDist; + uint_fast32_t sig32Z; + int_fast8_t roundingMode; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0x1F ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + signZ = signF16UI( uiA ); + expZ = expA; + sigZ = 0x0800 + sigA + sigB; + if ( ! (sigZ & 1) && (expZ < 0x1E) ) { + sigZ >>= 1; + goto pack; + } + sigZ <<= 3; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + signZ = signF16UI( uiA ); + if ( expDiff < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF16UI( signZ, 0x1F, 0 ); + goto uiZ; + } + if ( expDiff <= -13 ) { + uiZ = packToF16UI( signZ, expB, sigB ); + if ( expA | sigA ) goto addEpsilon; + goto uiZ; + } + expZ = expB; + sigX = sigB | 0x0400; + sigY = sigA + (expA ? 0x0400 : sigA); + shiftDist = 19 + expDiff; + } else { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + uiZ = uiA; + if ( expA == 0x1F ) { + if ( sigA ) goto propagateNaN; + goto uiZ; + } + if ( 13 <= expDiff ) { + if ( expB | sigB ) goto addEpsilon; + goto uiZ; + } + expZ = expA; + sigX = sigA | 0x0400; + sigY = sigB + (expB ? 0x0400 : sigB); + shiftDist = 19 - expDiff; + } + sig32Z = + ((uint_fast32_t) sigX<<19) + ((uint_fast32_t) sigY<>16; + if ( sig32Z & 0xFFFF ) { + sigZ |= 1; + } else { + if ( ! (sigZ & 0xF) && (expZ < 0x1E) ) { + sigZ >>= 4; + goto pack; + } + } + } + return softfloat_roundPackToF16( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + addEpsilon: + roundingMode = softfloat_roundingMode; + if ( roundingMode != softfloat_round_near_even ) { + if ( + roundingMode + == (signF16UI( uiZ ) ? softfloat_round_min + : softfloat_round_max) + ) { + ++uiZ; + if ( (uint16_t) (uiZ<<1) == 0xF800 ) { + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + } + } +#ifdef SOFTFLOAT_ROUND_ODD + else if ( roundingMode == softfloat_round_odd ) { + uiZ |= 1; + } +#endif + } + softfloat_exceptionFlags |= softfloat_flag_inexact; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + pack: + uiZ = packToF16UI( signZ, expZ, sigZ ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_addMagsF32.c b/vendor/riscv-isa-sim/softfloat/s_addMagsF32.c new file mode 100644 index 00000000..ba647814 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addMagsF32.c @@ -0,0 +1,126 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float32_t softfloat_addMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + signZ = signF32UI( uiA ); + expZ = expA; + sigZ = 0x01000000 + sigA + sigB; + if ( ! (sigZ & 1) && (expZ < 0xFE) ) { + uiZ = packToF32UI( signZ, expZ, sigZ>>1 ); + goto uiZ; + } + sigZ <<= 6; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + signZ = signF32UI( uiA ); + sigA <<= 6; + sigB <<= 6; + if ( expDiff < 0 ) { + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF32UI( signZ, 0xFF, 0 ); + goto uiZ; + } + expZ = expB; + sigA += expA ? 0x20000000 : sigA; + sigA = softfloat_shiftRightJam32( sigA, -expDiff ); + } else { + if ( expA == 0xFF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigB += expB ? 0x20000000 : sigB; + sigB = softfloat_shiftRightJam32( sigB, expDiff ); + } + sigZ = 0x20000000 + sigA + sigB; + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF32( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF32UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_addMagsF64.c b/vendor/riscv-isa-sim/softfloat/s_addMagsF64.c new file mode 100644 index 00000000..63e1afe9 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_addMagsF64.c @@ -0,0 +1,128 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" + +float64_t + softfloat_addMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! expA ) { + uiZ = uiA + sigB; + goto uiZ; + } + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + sigZ = UINT64_C( 0x0020000000000000 ) + sigA + sigB; + sigZ <<= 9; + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sigA <<= 9; + sigB <<= 9; + if ( expDiff < 0 ) { + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN; + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + goto uiZ; + } + expZ = expB; + if ( expA ) { + sigA += UINT64_C( 0x2000000000000000 ); + } else { + sigA <<= 1; + } + sigA = softfloat_shiftRightJam64( sigA, -expDiff ); + } else { + if ( expA == 0x7FF ) { + if ( sigA ) goto propagateNaN; + uiZ = uiA; + goto uiZ; + } + expZ = expA; + if ( expB ) { + sigB += UINT64_C( 0x2000000000000000 ); + } else { + sigB <<= 1; + } + sigB = softfloat_shiftRightJam64( sigB, expDiff ); + } + sigZ = UINT64_C( 0x2000000000000000 ) + sigA + sigB; + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_approxRecip32_1.c b/vendor/riscv-isa-sim/softfloat/s_approxRecip32_1.c new file mode 100644 index 00000000..a06192ed --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_approxRecip32_1.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_approxRecip32_1 + +extern const uint16_t softfloat_approxRecip_1k0s[16]; +extern const uint16_t softfloat_approxRecip_1k1s[16]; + +uint32_t softfloat_approxRecip32_1( uint32_t a ) +{ + int index; + uint16_t eps, r0; + uint32_t sigma0; + uint_fast32_t r; + uint32_t sqrSigma0; + + index = a>>27 & 0xF; + eps = (uint16_t) (a>>11); + r0 = softfloat_approxRecip_1k0s[index] + - ((softfloat_approxRecip_1k1s[index] * (uint_fast32_t) eps)>>20); + sigma0 = ~(uint_fast32_t) ((r0 * (uint_fast64_t) a)>>7); + r = ((uint_fast32_t) r0<<16) + ((r0 * (uint_fast64_t) sigma0)>>24); + sqrSigma0 = ((uint_fast64_t) sigma0 * sigma0)>>32; + r += ((uint32_t) r * (uint_fast64_t) sqrSigma0)>>48; + return r; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt32_1.c b/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt32_1.c new file mode 100644 index 00000000..2ab71a25 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt32_1.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_approxRecipSqrt32_1 + +extern const uint16_t softfloat_approxRecipSqrt_1k0s[]; +extern const uint16_t softfloat_approxRecipSqrt_1k1s[]; + +uint32_t softfloat_approxRecipSqrt32_1( unsigned int oddExpA, uint32_t a ) +{ + int index; + uint16_t eps, r0; + uint_fast32_t ESqrR0; + uint32_t sigma0; + uint_fast32_t r; + uint32_t sqrSigma0; + + index = (a>>27 & 0xE) + oddExpA; + eps = (uint16_t) (a>>12); + r0 = softfloat_approxRecipSqrt_1k0s[index] + - ((softfloat_approxRecipSqrt_1k1s[index] * (uint_fast32_t) eps) + >>20); + ESqrR0 = (uint_fast32_t) r0 * r0; + if ( ! oddExpA ) ESqrR0 <<= 1; + sigma0 = ~(uint_fast32_t) (((uint32_t) ESqrR0 * (uint_fast64_t) a)>>23); + r = ((uint_fast32_t) r0<<16) + ((r0 * (uint_fast64_t) sigma0)>>25); + sqrSigma0 = ((uint_fast64_t) sigma0 * sigma0)>>32; + r += ((uint32_t) ((r>>1) + (r>>3) - ((uint_fast32_t) r0<<14)) + * (uint_fast64_t) sqrSigma0) + >>48; + if ( ! (r & 0x80000000) ) r = 0x80000000; + return r; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt_1Ks.c b/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt_1Ks.c new file mode 100644 index 00000000..a60cf825 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_approxRecipSqrt_1Ks.c @@ -0,0 +1,49 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint16_t softfloat_approxRecipSqrt_1k0s[16] = { + 0xB4C9, 0xFFAB, 0xAA7D, 0xF11C, 0xA1C5, 0xE4C7, 0x9A43, 0xDA29, + 0x93B5, 0xD0E5, 0x8DED, 0xC8B7, 0x88C6, 0xC16D, 0x8424, 0xBAE1 +}; +const uint16_t softfloat_approxRecipSqrt_1k1s[16] = { + 0xA5A5, 0xEA42, 0x8C21, 0xC62D, 0x788F, 0xAA7F, 0x6928, 0x94B6, + 0x5CC7, 0x8335, 0x52A6, 0x74E2, 0x4A3E, 0x68FE, 0x432B, 0x5EFD +}; + diff --git a/vendor/riscv-isa-sim/softfloat/s_approxRecip_1Ks.c b/vendor/riscv-isa-sim/softfloat/s_approxRecip_1Ks.c new file mode 100644 index 00000000..1108fcbe --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_approxRecip_1Ks.c @@ -0,0 +1,49 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint16_t softfloat_approxRecip_1k0s[16] = { + 0xFFC4, 0xF0BE, 0xE363, 0xD76F, 0xCCAD, 0xC2F0, 0xBA16, 0xB201, + 0xAA97, 0xA3C6, 0x9D7A, 0x97A6, 0x923C, 0x8D32, 0x887E, 0x8417 +}; +const uint16_t softfloat_approxRecip_1k1s[16] = { + 0xF0F1, 0xD62C, 0xBFA1, 0xAC77, 0x9C0A, 0x8DDB, 0x8185, 0x76BA, + 0x6D3B, 0x64D4, 0x5D5C, 0x56B1, 0x50B6, 0x4B55, 0x4679, 0x4211 +}; + diff --git a/vendor/riscv-isa-sim/softfloat/s_commonNaNToF128UI.c b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF128UI.c new file mode 100644 index 00000000..9b97f343 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF128UI.c @@ -0,0 +1,56 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include "platform.h" +#include "primitiveTypes.h" + +#define softfloat_commonNaNToF128UI softfloat_commonNaNToF128UI +#include "specialize.h" + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 128-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +struct uint128 softfloat_commonNaNToF128UI( const struct commonNaN *aPtr ) +{ + struct uint128 uiZ; + + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + return uiZ; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_commonNaNToF16UI.c b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF16UI.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF16UI.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_commonNaNToF32UI.c b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF32UI.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF32UI.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_commonNaNToF64UI.c b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF64UI.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_commonNaNToF64UI.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_compare128M.c b/vendor/riscv-isa-sim/softfloat/s_compare128M.c new file mode 100644 index 00000000..c2819e20 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_compare128M.c @@ -0,0 +1,62 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_compare128M + +int_fast8_t softfloat_compare128M( const uint32_t *aPtr, const uint32_t *bPtr ) +{ + unsigned int index, lastIndex; + uint32_t wordA, wordB; + + index = indexWordHi( 4 ); + lastIndex = indexWordLo( 4 ); + for (;;) { + wordA = aPtr[index]; + wordB = bPtr[index]; + if ( wordA != wordB ) return (wordA < wordB) ? -1 : 1; + if ( index == lastIndex ) break; + index -= wordIncr; + } + return 0; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_compare96M.c b/vendor/riscv-isa-sim/softfloat/s_compare96M.c new file mode 100644 index 00000000..0dc39f5d --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_compare96M.c @@ -0,0 +1,62 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_compare96M + +int_fast8_t softfloat_compare96M( const uint32_t *aPtr, const uint32_t *bPtr ) +{ + unsigned int index, lastIndex; + uint32_t wordA, wordB; + + index = indexWordHi( 3 ); + lastIndex = indexWordLo( 3 ); + for (;;) { + wordA = aPtr[index]; + wordB = bPtr[index]; + if ( wordA != wordB ) return (wordA < wordB) ? -1 : 1; + if ( index == lastIndex ) break; + index -= wordIncr; + } + return 0; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros16.c b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros16.c new file mode 100644 index 00000000..950db6c8 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros16.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros16 + +#define softfloat_countLeadingZeros16 softfloat_countLeadingZeros16 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros16( uint16_t a ) +{ + uint_fast8_t count; + + count = 8; + if ( 0x100 <= a ) { + count = 0; + a >>= 8; + } + count += softfloat_countLeadingZeros8[a]; + return count; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros32.c b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros32.c new file mode 100644 index 00000000..fbf8ab6a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros32.c @@ -0,0 +1,64 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros32 + +#define softfloat_countLeadingZeros32 softfloat_countLeadingZeros32 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros32( uint32_t a ) +{ + uint_fast8_t count; + + count = 0; + if ( a < 0x10000 ) { + count = 16; + a <<= 16; + } + if ( a < 0x1000000 ) { + count += 8; + a <<= 8; + } + count += softfloat_countLeadingZeros8[a>>24]; + return count; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros64.c b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros64.c new file mode 100644 index 00000000..00457418 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros64.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_countLeadingZeros64 + +#define softfloat_countLeadingZeros64 softfloat_countLeadingZeros64 +#include "primitives.h" + +uint_fast8_t softfloat_countLeadingZeros64( uint64_t a ) +{ + uint_fast8_t count; + uint32_t a32; + + count = 0; + a32 = a>>32; + if ( ! a32 ) { + count = 32; + a32 = a; + } + /*------------------------------------------------------------------------ + | From here, result is current count + count leading zeros of `a32'. + *------------------------------------------------------------------------*/ + if ( a32 < 0x10000 ) { + count += 16; + a32 <<= 16; + } + if ( a32 < 0x1000000 ) { + count += 8; + a32 <<= 8; + } + count += softfloat_countLeadingZeros8[a32>>24]; + return count; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros8.c b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros8.c new file mode 100644 index 00000000..1158d01c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_countLeadingZeros8.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitives.h" + +const uint_least8_t softfloat_countLeadingZeros8[256] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + diff --git a/vendor/riscv-isa-sim/softfloat/s_eq128.c b/vendor/riscv-isa-sim/softfloat/s_eq128.c new file mode 100644 index 00000000..625ef002 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_eq128.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" + +#ifndef softfloat_eq128 + +bool softfloat_eq128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + + return (a64 == b64) && (a0 == b0); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_f128UIToCommonNaN.c b/vendor/riscv-isa-sim/softfloat/s_f128UIToCommonNaN.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_f128UIToCommonNaN.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_f16UIToCommonNaN.c b/vendor/riscv-isa-sim/softfloat/s_f16UIToCommonNaN.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_f16UIToCommonNaN.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_f32UIToCommonNaN.c b/vendor/riscv-isa-sim/softfloat/s_f32UIToCommonNaN.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_f32UIToCommonNaN.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_f64UIToCommonNaN.c b/vendor/riscv-isa-sim/softfloat/s_f64UIToCommonNaN.c new file mode 100644 index 00000000..861b2696 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_f64UIToCommonNaN.c @@ -0,0 +1,5 @@ + +/*---------------------------------------------------------------------------- +| This file intentionally contains no code. +*----------------------------------------------------------------------------*/ + diff --git a/vendor/riscv-isa-sim/softfloat/s_le128.c b/vendor/riscv-isa-sim/softfloat/s_le128.c new file mode 100644 index 00000000..7261012f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_le128.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" + +#ifndef softfloat_le128 + +bool softfloat_le128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + + return (a64 < b64) || ((a64 == b64) && (a0 <= b0)); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_lt128.c b/vendor/riscv-isa-sim/softfloat/s_lt128.c new file mode 100644 index 00000000..0d461c36 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_lt128.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" + +#ifndef softfloat_lt128 + +bool softfloat_lt128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + + return (a64 < b64) || ((a64 == b64) && (a0 < b0)); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul128By32.c b/vendor/riscv-isa-sim/softfloat/s_mul128By32.c new file mode 100644 index 00000000..6e71dd0c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul128By32.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul128By32 + +struct uint128 softfloat_mul128By32( uint64_t a64, uint64_t a0, uint32_t b ) +{ + struct uint128 z; + uint_fast64_t mid; + uint_fast32_t carry; + + z.v0 = a0 * b; + mid = (uint_fast64_t) (uint32_t) (a0>>32) * b; + carry = (uint32_t) ((uint_fast32_t) (z.v0>>32) - (uint_fast32_t) mid); + z.v64 = a64 * b + (uint_fast32_t) ((mid + carry)>>32); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul128MTo256M.c b/vendor/riscv-isa-sim/softfloat/s_mul128MTo256M.c new file mode 100644 index 00000000..49a1d294 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul128MTo256M.c @@ -0,0 +1,100 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul128MTo256M + +void + softfloat_mul128MTo256M( + const uint32_t *aPtr, const uint32_t *bPtr, uint32_t *zPtr ) +{ + uint32_t *lastZPtr, wordB; + uint64_t dwordProd; + uint32_t wordZ; + uint_fast8_t carry; + + bPtr += indexWordLo( 4 ); + lastZPtr = zPtr + indexMultiwordHi( 8, 5 ); + zPtr += indexMultiwordLo( 8, 5 ); + wordB = *bPtr; + dwordProd = (uint64_t) aPtr[indexWord( 4, 0 )] * wordB; + zPtr[indexWord( 5, 0 )] = dwordProd; + dwordProd = (uint64_t) aPtr[indexWord( 4, 1 )] * wordB + (dwordProd>>32); + zPtr[indexWord( 5, 1 )] = dwordProd; + dwordProd = (uint64_t) aPtr[indexWord( 4, 2 )] * wordB + (dwordProd>>32); + zPtr[indexWord( 5, 2 )] = dwordProd; + dwordProd = (uint64_t) aPtr[indexWord( 4, 3 )] * wordB + (dwordProd>>32); + zPtr[indexWord( 5, 3 )] = dwordProd; + zPtr[indexWord( 5, 4 )] = dwordProd>>32; + do { + bPtr += wordIncr; + zPtr += wordIncr; + wordB = *bPtr; + dwordProd = (uint64_t) aPtr[indexWord( 4, 0 )] * wordB; + wordZ = zPtr[indexWord( 5, 0 )] + (uint32_t) dwordProd; + zPtr[indexWord( 5, 0 )] = wordZ; + carry = (wordZ < (uint32_t) dwordProd); + dwordProd = + (uint64_t) aPtr[indexWord( 4, 1 )] * wordB + (dwordProd>>32); + wordZ = zPtr[indexWord( 5, 1 )] + (uint32_t) dwordProd + carry; + zPtr[indexWord( 5, 1 )] = wordZ; + if ( wordZ != (uint32_t) dwordProd ) { + carry = (wordZ < (uint32_t) dwordProd); + } + dwordProd = + (uint64_t) aPtr[indexWord( 4, 2 )] * wordB + (dwordProd>>32); + wordZ = zPtr[indexWord( 5, 2 )] + (uint32_t) dwordProd + carry; + zPtr[indexWord( 5, 2 )] = wordZ; + if ( wordZ != (uint32_t) dwordProd ) { + carry = (wordZ < (uint32_t) dwordProd); + } + dwordProd = + (uint64_t) aPtr[indexWord( 4, 3 )] * wordB + (dwordProd>>32); + wordZ = zPtr[indexWord( 5, 3 )] + (uint32_t) dwordProd + carry; + zPtr[indexWord( 5, 3 )] = wordZ; + if ( wordZ != (uint32_t) dwordProd ) { + carry = (wordZ < (uint32_t) dwordProd); + } + zPtr[indexWord( 5, 4 )] = (dwordProd>>32) + carry; + } while ( zPtr != lastZPtr ); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul128To256M.c b/vendor/riscv-isa-sim/softfloat/s_mul128To256M.c new file mode 100644 index 00000000..fccc2a69 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul128To256M.c @@ -0,0 +1,71 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_mul128To256M + +#define softfloat_mul128To256M softfloat_mul128To256M +#include "primitives.h" + +void + softfloat_mul128To256M( + uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0, uint64_t *zPtr ) +{ + struct uint128 p0, p64, p128; + uint_fast64_t z64, z128, z192; + + p0 = softfloat_mul64To128( a0, b0 ); + zPtr[indexWord( 4, 0 )] = p0.v0; + p64 = softfloat_mul64To128( a64, b0 ); + z64 = p64.v0 + p0.v64; + z128 = p64.v64 + (z64 < p64.v0); + p128 = softfloat_mul64To128( a64, b64 ); + z128 += p128.v0; + z192 = p128.v64 + (z128 < p128.v0); + p64 = softfloat_mul64To128( a0, b64 ); + z64 += p64.v0; + zPtr[indexWord( 4, 1 )] = z64; + p64.v64 += (z64 < p64.v0); + z128 += p64.v64; + zPtr[indexWord( 4, 2 )] = z128; + zPtr[indexWord( 4, 3 )] = z192 + (z128 < p64.v64); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul64ByShifted32To128.c b/vendor/riscv-isa-sim/softfloat/s_mul64ByShifted32To128.c new file mode 100644 index 00000000..f7e7104e --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul64ByShifted32To128.c @@ -0,0 +1,56 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul64ByShifted32To128 + +struct uint128 softfloat_mul64ByShifted32To128( uint64_t a, uint32_t b ) +{ + uint_fast64_t mid; + struct uint128 z; + + mid = (uint_fast64_t) (uint32_t) a * b; + z.v0 = mid<<32; + z.v64 = (uint_fast64_t) (uint32_t) (a>>32) * b + (mid>>32); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul64To128.c b/vendor/riscv-isa-sim/softfloat/s_mul64To128.c new file mode 100644 index 00000000..6620a20b --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul64To128.c @@ -0,0 +1,66 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul64To128 + +struct uint128 softfloat_mul64To128( uint64_t a, uint64_t b ) +{ + uint32_t a32, a0, b32, b0; + struct uint128 z; + uint64_t mid1, mid; + + a32 = a>>32; + a0 = a; + b32 = b>>32; + b0 = b; + z.v0 = (uint_fast64_t) a0 * b0; + mid1 = (uint_fast64_t) a32 * b0; + mid = mid1 + (uint_fast64_t) a0 * b32; + z.v64 = (uint_fast64_t) a32 * b32; + z.v64 += (uint_fast64_t) (mid < mid1)<<32 | mid>>32; + mid <<= 32; + z.v0 += mid; + z.v64 += (z.v0 < mid); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mul64To128M.c b/vendor/riscv-isa-sim/softfloat/s_mul64To128M.c new file mode 100644 index 00000000..e3f9a481 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mul64To128M.c @@ -0,0 +1,68 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_mul64To128M + +void softfloat_mul64To128M( uint64_t a, uint64_t b, uint32_t *zPtr ) +{ + uint32_t a32, a0, b32, b0; + uint64_t z0, mid1, z64, mid; + + a32 = a>>32; + a0 = a; + b32 = b>>32; + b0 = b; + z0 = (uint64_t) a0 * b0; + mid1 = (uint64_t) a32 * b0; + mid = mid1 + (uint64_t) a0 * b32; + z64 = (uint64_t) a32 * b32; + z64 += (uint64_t) (mid < mid1)<<32 | mid>>32; + mid <<= 32; + z0 += mid; + zPtr[indexWord( 4, 1 )] = z0>>32; + zPtr[indexWord( 4, 0 )] = z0; + z64 += (z0 < mid); + zPtr[indexWord( 4, 3 )] = z64>>32; + zPtr[indexWord( 4, 2 )] = z64; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_mulAddF128.c b/vendor/riscv-isa-sim/softfloat/s_mulAddF128.c new file mode 100644 index 00000000..877b33d2 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mulAddF128.c @@ -0,0 +1,350 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t + softfloat_mulAddF128( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0, + uint_fast64_t uiC64, + uint_fast64_t uiC0, + uint_fast8_t op + ) +{ + bool signA; + int_fast32_t expA; + struct uint128 sigA; + bool signB; + int_fast32_t expB; + struct uint128 sigB; + bool signC; + int_fast32_t expC; + struct uint128 sigC; + bool signZ; + uint_fast64_t magBits; + struct uint128 uiZ; + struct exp32_sig128 normExpSig; + int_fast32_t expZ; + uint64_t sig256Z[4]; + struct uint128 sigZ; + int_fast32_t shiftDist, expDiff; + struct uint128 x128; + uint64_t sig256C[4]; + static uint64_t zero256[4] = INIT_UINTM4( 0, 0, 0, 0 ); + uint_fast64_t sigZExtra, sig256Z0; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF128UI64( uiA64 ); + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + signB = signF128UI64( uiB64 ); + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + signC = signF128UI64( uiC64 ) ^ (op == softfloat_mulAdd_subC); + expC = expF128UI64( uiC64 ); + sigC.v64 = fracF128UI64( uiC64 ); + sigC.v0 = uiC0; + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FFF ) { + if ( + (sigA.v64 | sigA.v0) || ((expB == 0x7FFF) && (sigB.v64 | sigB.v0)) + ) { + goto propagateNaN_ABC; + } + magBits = expB | sigB.v64 | sigB.v0; + goto infProdArg; + } + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN_ABC; + magBits = expA | sigA.v64 | sigA.v0; + goto infProdArg; + } + if ( expC == 0x7FFF ) { + if ( sigC.v64 | sigC.v0 ) { + uiZ.v64 = 0; + uiZ.v0 = 0; + goto propagateNaN_ZC; + } + uiZ.v64 = uiC64; + uiZ.v0 = uiC0; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! (sigA.v64 | sigA.v0) ) goto zeroProd; + normExpSig = softfloat_normSubnormalF128Sig( sigA.v64, sigA.v0 ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! (sigB.v64 | sigB.v0) ) goto zeroProd; + normExpSig = softfloat_normSubnormalF128Sig( sigB.v64, sigB.v0 ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FFE; + sigA.v64 |= UINT64_C( 0x0001000000000000 ); + sigB.v64 |= UINT64_C( 0x0001000000000000 ); + sigA = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 8 ); + sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 15 ); + softfloat_mul128To256M( sigA.v64, sigA.v0, sigB.v64, sigB.v0, sig256Z ); + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + shiftDist = 0; + if ( ! (sigZ.v64 & UINT64_C( 0x0100000000000000 )) ) { + --expZ; + shiftDist = -1; + } + if ( ! expC ) { + if ( ! (sigC.v64 | sigC.v0) ) { + shiftDist += 8; + goto sigZ; + } + normExpSig = softfloat_normSubnormalF128Sig( sigC.v64, sigC.v0 ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC.v64 |= UINT64_C( 0x0001000000000000 ); + sigC = softfloat_shortShiftLeft128( sigC.v64, sigC.v0, 8 ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + shiftDist -= expDiff; + if ( shiftDist ) { + sigZ = + softfloat_shiftRightJam128( sigZ.v64, sigZ.v0, shiftDist ); + } + } else { + if ( ! shiftDist ) { + x128 = + softfloat_shortShiftRight128( + sig256Z[indexWord( 4, 1 )], sig256Z[indexWord( 4, 0 )], + 1 + ); + sig256Z[indexWord( 4, 1 )] = (sigZ.v0<<63) | x128.v64; + sig256Z[indexWord( 4, 0 )] = x128.v0; + sigZ = softfloat_shortShiftRight128( sigZ.v64, sigZ.v0, 1 ); + sig256Z[indexWord( 4, 3 )] = sigZ.v64; + sig256Z[indexWord( 4, 2 )] = sigZ.v0; + } + } + } else { + if ( shiftDist ) softfloat_add256M( sig256Z, sig256Z, sig256Z ); + if ( ! expDiff ) { + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + } else { + sig256C[indexWord( 4, 3 )] = sigC.v64; + sig256C[indexWord( 4, 2 )] = sigC.v0; + sig256C[indexWord( 4, 1 )] = 0; + sig256C[indexWord( 4, 0 )] = 0; + softfloat_shiftRightJam256M( sig256C, expDiff, sig256C ); + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + shiftDist = 8; + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ = softfloat_add128( sigC.v64, sigC.v0, sigZ.v64, sigZ.v0 ); + } else { + softfloat_add256M( sig256Z, sig256C, sig256Z ); + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + } + if ( sigZ.v64 & UINT64_C( 0x0200000000000000 ) ) { + ++expZ; + shiftDist = 9; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + if ( expDiff < -1 ) { + sigZ = + softfloat_sub128( sigC.v64, sigC.v0, sigZ.v64, sigZ.v0 ); + sigZExtra = + sig256Z[indexWord( 4, 1 )] | sig256Z[indexWord( 4, 0 )]; + if ( sigZExtra ) { + sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, 0, 1 ); + } + if ( ! (sigZ.v64 & UINT64_C( 0x0100000000000000 )) ) { + --expZ; + shiftDist = 7; + } + goto shiftRightRoundPack; + } else { + sig256C[indexWord( 4, 3 )] = sigC.v64; + sig256C[indexWord( 4, 2 )] = sigC.v0; + sig256C[indexWord( 4, 1 )] = 0; + sig256C[indexWord( 4, 0 )] = 0; + softfloat_sub256M( sig256C, sig256Z, sig256Z ); + } + } else if ( ! expDiff ) { + sigZ = softfloat_sub128( sigZ.v64, sigZ.v0, sigC.v64, sigC.v0 ); + if ( + ! (sigZ.v64 | sigZ.v0) && ! sig256Z[indexWord( 4, 1 )] + && ! sig256Z[indexWord( 4, 0 )] + ) { + goto completeCancellation; + } + sig256Z[indexWord( 4, 3 )] = sigZ.v64; + sig256Z[indexWord( 4, 2 )] = sigZ.v0; + if ( sigZ.v64 & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + softfloat_sub256M( zero256, sig256Z, sig256Z ); + } + } else { + softfloat_sub256M( sig256Z, sig256C, sig256Z ); + if ( 1 < expDiff ) { + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + if ( ! (sigZ.v64 & UINT64_C( 0x0100000000000000 )) ) { + --expZ; + shiftDist = 7; + } + goto sigZ; + } + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sigZ.v64 = sig256Z[indexWord( 4, 3 )]; + sigZ.v0 = sig256Z[indexWord( 4, 2 )]; + sigZExtra = sig256Z[indexWord( 4, 1 )]; + sig256Z0 = sig256Z[indexWord( 4, 0 )]; + if ( sigZ.v64 ) { + if ( sig256Z0 ) sigZExtra |= 1; + } else { + expZ -= 64; + sigZ.v64 = sigZ.v0; + sigZ.v0 = sigZExtra; + sigZExtra = sig256Z0; + if ( ! sigZ.v64 ) { + expZ -= 64; + sigZ.v64 = sigZ.v0; + sigZ.v0 = sigZExtra; + sigZExtra = 0; + if ( ! sigZ.v64 ) { + expZ -= 64; + sigZ.v64 = sigZ.v0; + sigZ.v0 = 0; + } + } + } + shiftDist = softfloat_countLeadingZeros64( sigZ.v64 ); + expZ += 7 - shiftDist; + shiftDist = 15 - shiftDist; + if ( 0 < shiftDist ) goto shiftRightRoundPack; + if ( shiftDist ) { + shiftDist = -shiftDist; + sigZ = softfloat_shortShiftLeft128( sigZ.v64, sigZ.v0, shiftDist ); + x128 = softfloat_shortShiftLeft128( 0, sigZExtra, shiftDist ); + sigZ.v0 |= x128.v64; + sigZExtra = x128.v0; + } + goto roundPack; + } + sigZ: + sigZExtra = sig256Z[indexWord( 4, 1 )] | sig256Z[indexWord( 4, 0 )]; + shiftRightRoundPack: + sigZExtra = (uint64_t) (sigZ.v0<<(64 - shiftDist)) | (sigZExtra != 0); + sigZ = softfloat_shortShiftRight128( sigZ.v64, sigZ.v0, shiftDist ); + roundPack: + return + softfloat_roundPackToF128( + signZ, expZ - 1, sigZ.v64, sigZ.v0, sigZExtra ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ.v64 = packToF128UI64( signZ, 0x7FFF, 0 ); + uiZ.v0 = 0; + if ( expC != 0x7FFF ) goto uiZ; + if ( sigC.v64 | sigC.v0 ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF128UI( uiZ.v64, uiZ.v0, uiC64, uiC0 ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ.v64 = uiC64; + uiZ.v0 = uiC0; + if ( ! (expC | sigC.v64 | sigC.v0) && (signZ != signC) ) { + completeCancellation: + uiZ.v64 = + packToF128UI64( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + uiZ.v0 = 0; + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_mulAddF16.c b/vendor/riscv-isa-sim/softfloat/s_mulAddF16.c new file mode 100644 index 00000000..b6040072 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_mulAddF16.c @@ -0,0 +1,226 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t + softfloat_mulAddF16( + uint_fast16_t uiA, uint_fast16_t uiB, uint_fast16_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast8_t expA; + uint_fast16_t sigA; + bool signB; + int_fast8_t expB; + uint_fast16_t sigB; + bool signC; + int_fast8_t expC; + uint_fast16_t sigC; + bool signProd; + uint_fast16_t magBits, uiZ; + struct exp8_sig16 normExpSig; + int_fast8_t expProd; + uint_fast32_t sigProd; + bool signZ; + int_fast8_t expZ; + uint_fast16_t sigZ; + int_fast8_t expDiff; + uint_fast32_t sig32Z, sig32C; + int_fast8_t shiftDist; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF16UI( uiA ); + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + signB = signF16UI( uiB ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + signC = signF16UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF16UI( uiC ); + sigC = fracF16UI( uiC ); + signProd = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA || ((expB == 0x1F) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x1F ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x1F ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF16Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF16Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expProd = expA + expB - 0xE; + sigA = (sigA | 0x0400)<<4; + sigB = (sigB | 0x0400)<<4; + sigProd = (uint_fast32_t) sigA * sigB; + if ( sigProd < 0x20000000 ) { + --expProd; + sigProd <<= 1; + } + signZ = signProd; + if ( ! expC ) { + if ( ! sigC ) { + expZ = expProd - 1; + sigZ = sigProd>>15 | ((sigProd & 0x7FFF) != 0); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF16Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | 0x0400)<<3; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expProd - expC; + if ( signProd == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + expZ = expC; + sigZ = sigC + softfloat_shiftRightJam32( sigProd, 16 - expDiff ); + } else { + expZ = expProd; + sig32Z = + sigProd + + softfloat_shiftRightJam32( + (uint_fast32_t) sigC<<16, expDiff ); + sigZ = sig32Z>>16 | ((sig32Z & 0xFFFF) != 0 ); + } + if ( sigZ < 0x4000 ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig32C = (uint_fast32_t) sigC<<16; + if ( expDiff < 0 ) { + signZ = signC; + expZ = expC; + sig32Z = sig32C - softfloat_shiftRightJam32( sigProd, -expDiff ); + } else if ( ! expDiff ) { + expZ = expProd; + sig32Z = sigProd - sig32C; + if ( ! sig32Z ) goto completeCancellation; + if ( sig32Z & 0x80000000 ) { + signZ = ! signZ; + sig32Z = -sig32Z; + } + } else { + expZ = expProd; + sig32Z = sigProd - softfloat_shiftRightJam32( sig32C, expDiff ); + } + shiftDist = softfloat_countLeadingZeros32( sig32Z ) - 1; + expZ -= shiftDist; + shiftDist -= 16; + if ( shiftDist < 0 ) { + sigZ = + sig32Z>>(-shiftDist) + | ((uint32_t) (sig32Z<<(shiftDist & 31)) != 0); + } else { + sigZ = (uint_fast16_t) sig32Z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t + softfloat_mulAddF32( + uint_fast32_t uiA, uint_fast32_t uiB, uint_fast32_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast32_t sigA; + bool signB; + int_fast16_t expB; + uint_fast32_t sigB; + bool signC; + int_fast16_t expC; + uint_fast32_t sigC; + bool signProd; + uint_fast32_t magBits, uiZ; + struct exp16_sig32 normExpSig; + int_fast16_t expProd; + uint_fast64_t sigProd; + bool signZ; + int_fast16_t expZ; + uint_fast32_t sigZ; + int_fast16_t expDiff; + uint_fast64_t sig64Z, sig64C; + int_fast8_t shiftDist; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF32UI( uiA ); + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + signB = signF32UI( uiB ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + signC = signF32UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF32UI( uiC ); + sigC = fracF32UI( uiC ); + signProd = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA || ((expB == 0xFF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0xFF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0xFF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF32Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expProd = expA + expB - 0x7E; + sigA = (sigA | 0x00800000)<<7; + sigB = (sigB | 0x00800000)<<7; + sigProd = (uint_fast64_t) sigA * sigB; + if ( sigProd < UINT64_C( 0x2000000000000000 ) ) { + --expProd; + sigProd <<= 1; + } + signZ = signProd; + if ( ! expC ) { + if ( ! sigC ) { + expZ = expProd - 1; + sigZ = softfloat_shortShiftRightJam64( sigProd, 31 ); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF32Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | 0x00800000)<<6; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expProd - expC; + if ( signProd == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + expZ = expC; + sigZ = sigC + softfloat_shiftRightJam64( sigProd, 32 - expDiff ); + } else { + expZ = expProd; + sig64Z = + sigProd + + softfloat_shiftRightJam64( + (uint_fast64_t) sigC<<32, expDiff ); + sigZ = softfloat_shortShiftRightJam64( sig64Z, 32 ); + } + if ( sigZ < 0x40000000 ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + sig64C = (uint_fast64_t) sigC<<32; + if ( expDiff < 0 ) { + signZ = signC; + expZ = expC; + sig64Z = sig64C - softfloat_shiftRightJam64( sigProd, -expDiff ); + } else if ( ! expDiff ) { + expZ = expProd; + sig64Z = sigProd - sig64C; + if ( ! sig64Z ) goto completeCancellation; + if ( sig64Z & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig64Z = -sig64Z; + } + } else { + expZ = expProd; + sig64Z = sigProd - softfloat_shiftRightJam64( sig64C, expDiff ); + } + shiftDist = softfloat_countLeadingZeros64( sig64Z ) - 1; + expZ -= shiftDist; + shiftDist -= 32; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig64Z, -shiftDist ); + } else { + sigZ = (uint_fast32_t) sig64Z< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#ifdef SOFTFLOAT_FAST_INT64 + +float64_t + softfloat_mulAddF64( + uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint_fast64_t sigA; + bool signB; + int_fast16_t expB; + uint_fast64_t sigB; + bool signC; + int_fast16_t expC; + uint_fast64_t sigC; + bool signZ; + uint_fast64_t magBits, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + struct uint128 sig128Z; + uint_fast64_t sigZ; + int_fast16_t expDiff; + struct uint128 sig128C; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signC = signF64UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF64UI( uiC ); + sigC = fracF64UI( uiC ); + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x7FF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FE; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<10; + sig128Z = softfloat_mul64To128( sigA, sigB ); + if ( sig128Z.v64 < UINT64_C( 0x2000000000000000 ) ) { + --expZ; + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128Z.v64, sig128Z.v0 ); + } + if ( ! expC ) { + if ( ! sigC ) { + --expZ; + sigZ = sig128Z.v64<<1 | (sig128Z.v0 != 0); + goto roundPack; + } + normExpSig = softfloat_normSubnormalF64Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | UINT64_C( 0x0010000000000000 ))<<9; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + sig128Z.v64 = softfloat_shiftRightJam64( sig128Z.v64, -expDiff ); + } else { + sig128Z = + softfloat_shortShiftRightJam128( sig128Z.v64, sig128Z.v0, 1 ); + } + } else if ( expDiff ) { + sig128C = softfloat_shiftRightJam128( sigC, 0, expDiff ); + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ = (sigC + sig128Z.v64) | (sig128Z.v0 != 0); + } else { + sig128Z = + softfloat_add128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + sigZ = sig128Z.v64 | (sig128Z.v0 != 0); + } + if ( sigZ < UINT64_C( 0x4000000000000000 ) ) { + --expZ; + sigZ <<= 1; + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + sig128Z = softfloat_sub128( sigC, 0, sig128Z.v64, sig128Z.v0 ); + } else if ( ! expDiff ) { + sig128Z.v64 = sig128Z.v64 - sigC; + if ( ! (sig128Z.v64 | sig128Z.v0) ) goto completeCancellation; + if ( sig128Z.v64 & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + sig128Z = softfloat_sub128( 0, 0, sig128Z.v64, sig128Z.v0 ); + } + } else { + sig128Z = + softfloat_sub128( + sig128Z.v64, sig128Z.v0, sig128C.v64, sig128C.v0 ); + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( ! sig128Z.v64 ) { + expZ -= 64; + sig128Z.v64 = sig128Z.v0; + sig128Z.v0 = 0; + } + shiftDist = softfloat_countLeadingZeros64( sig128Z.v64 ) - 1; + expZ -= shiftDist; + if ( shiftDist < 0 ) { + sigZ = softfloat_shortShiftRightJam64( sig128Z.v64, -shiftDist ); + } else { + sig128Z = + softfloat_shortShiftLeft128( + sig128Z.v64, sig128Z.v0, shiftDist ); + sigZ = sig128Z.v64; + } + sigZ |= (sig128Z.v0 != 0); + } + roundPack: + return softfloat_roundPackToF64( signZ, expZ, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + if ( expC != 0x7FF ) goto uiZ; + if ( sigC ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF64UI( uiZ, uiC ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ = uiC; + if ( ! (expC | sigC) && (signZ != signC) ) { + completeCancellation: + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + +#else + +float64_t + softfloat_mulAddF64( + uint_fast64_t uiA, uint_fast64_t uiB, uint_fast64_t uiC, uint_fast8_t op ) +{ + bool signA; + int_fast16_t expA; + uint64_t sigA; + bool signB; + int_fast16_t expB; + uint64_t sigB; + bool signC; + int_fast16_t expC; + uint64_t sigC; + bool signZ; + uint64_t magBits, uiZ; + struct exp16_sig64 normExpSig; + int_fast16_t expZ; + uint32_t sig128Z[4]; + uint64_t sigZ; + int_fast16_t shiftDist, expDiff; + uint32_t sig128C[4]; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + signA = signF64UI( uiA ); + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + signB = signF64UI( uiB ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + signC = signF64UI( uiC ) ^ (op == softfloat_mulAdd_subC); + expC = expF64UI( uiC ); + sigC = fracF64UI( uiC ); + signZ = signA ^ signB ^ (op == softfloat_mulAdd_subProd); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA || ((expB == 0x7FF) && sigB) ) goto propagateNaN_ABC; + magBits = expB | sigB; + goto infProdArg; + } + if ( expB == 0x7FF ) { + if ( sigB ) goto propagateNaN_ABC; + magBits = expA | sigA; + goto infProdArg; + } + if ( expC == 0x7FF ) { + if ( sigC ) { + uiZ = 0; + goto propagateNaN_ZC; + } + uiZ = uiC; + goto uiZ; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( ! expA ) { + if ( ! sigA ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigA ); + expA = normExpSig.exp; + sigA = normExpSig.sig; + } + if ( ! expB ) { + if ( ! sigB ) goto zeroProd; + normExpSig = softfloat_normSubnormalF64Sig( sigB ); + expB = normExpSig.exp; + sigB = normExpSig.sig; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expZ = expA + expB - 0x3FE; + sigA = (sigA | UINT64_C( 0x0010000000000000 ))<<10; + sigB = (sigB | UINT64_C( 0x0010000000000000 ))<<11; + softfloat_mul64To128M( sigA, sigB, sig128Z ); + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 | sig128Z[indexWord( 4, 2 )]; + shiftDist = 0; + if ( ! (sigZ & UINT64_C( 0x4000000000000000 )) ) { + --expZ; + shiftDist = -1; + } + if ( ! expC ) { + if ( ! sigC ) { + if ( shiftDist ) sigZ <<= 1; + goto sigZ; + } + normExpSig = softfloat_normSubnormalF64Sig( sigC ); + expC = normExpSig.exp; + sigC = normExpSig.sig; + } + sigC = (sigC | UINT64_C( 0x0010000000000000 ))<<10; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expZ - expC; + if ( expDiff < 0 ) { + expZ = expC; + if ( (signZ == signC) || (expDiff < -1) ) { + shiftDist -= expDiff; + if ( shiftDist) { + sigZ = softfloat_shiftRightJam64( sigZ, shiftDist ); + } + } else { + if ( ! shiftDist ) { + softfloat_shortShiftRight128M( sig128Z, 1, sig128Z ); + } + } + } else { + if ( shiftDist ) softfloat_add128M( sig128Z, sig128Z, sig128Z ); + if ( ! expDiff ) { + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + } else { + sig128C[indexWord( 4, 3 )] = sigC>>32; + sig128C[indexWord( 4, 2 )] = sigC; + sig128C[indexWord( 4, 1 )] = 0; + sig128C[indexWord( 4, 0 )] = 0; + softfloat_shiftRightJam128M( sig128C, expDiff, sig128C ); + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( signZ == signC ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff <= 0 ) { + sigZ += sigC; + } else { + softfloat_add128M( sig128Z, sig128C, sig128Z ); + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + } + if ( sigZ & UINT64_C( 0x8000000000000000 ) ) { + ++expZ; + sigZ = softfloat_shortShiftRightJam64( sigZ, 1 ); + } + } else { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expDiff < 0 ) { + signZ = signC; + if ( expDiff < -1 ) { + sigZ = sigC - sigZ; + if ( + sig128Z[indexWord( 4, 1 )] || sig128Z[indexWord( 4, 0 )] + ) { + sigZ = (sigZ - 1) | 1; + } + if ( ! (sigZ & UINT64_C( 0x4000000000000000 )) ) { + --expZ; + sigZ <<= 1; + } + goto roundPack; + } else { + sig128C[indexWord( 4, 3 )] = sigC>>32; + sig128C[indexWord( 4, 2 )] = sigC; + sig128C[indexWord( 4, 1 )] = 0; + sig128C[indexWord( 4, 0 )] = 0; + softfloat_sub128M( sig128C, sig128Z, sig128Z ); + } + } else if ( ! expDiff ) { + sigZ -= sigC; + if ( + ! sigZ && ! sig128Z[indexWord( 4, 1 )] + && ! sig128Z[indexWord( 4, 0 )] + ) { + goto completeCancellation; + } + sig128Z[indexWord( 4, 3 )] = sigZ>>32; + sig128Z[indexWord( 4, 2 )] = sigZ; + if ( sigZ & UINT64_C( 0x8000000000000000 ) ) { + signZ = ! signZ; + softfloat_negX128M( sig128Z ); + } + } else { + softfloat_sub128M( sig128Z, sig128C, sig128Z ); + if ( 1 < expDiff ) { + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + if ( ! (sigZ & UINT64_C( 0x4000000000000000 )) ) { + --expZ; + sigZ <<= 1; + } + goto sigZ; + } + } + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + shiftDist = 0; + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + if ( ! sigZ ) { + shiftDist = 64; + sigZ = + (uint64_t) sig128Z[indexWord( 4, 1 )]<<32 + | sig128Z[indexWord( 4, 0 )]; + } + shiftDist += softfloat_countLeadingZeros64( sigZ ) - 1; + if ( shiftDist ) { + expZ -= shiftDist; + softfloat_shiftLeft128M( sig128Z, shiftDist, sig128Z ); + sigZ = + (uint64_t) sig128Z[indexWord( 4, 3 )]<<32 + | sig128Z[indexWord( 4, 2 )]; + } + } + sigZ: + if ( sig128Z[indexWord( 4, 1 )] || sig128Z[indexWord( 4, 0 )] ) sigZ |= 1; + roundPack: + return softfloat_roundPackToF64( signZ, expZ - 1, sigZ ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN_ABC: + uiZ = softfloat_propagateNaNF64UI( uiA, uiB ); + goto propagateNaN_ZC; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + infProdArg: + if ( magBits ) { + uiZ = packToF64UI( signZ, 0x7FF, 0 ); + if ( expC != 0x7FF ) goto uiZ; + if ( sigC ) goto propagateNaN_ZC; + if ( signZ == signC ) goto uiZ; + } + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + propagateNaN_ZC: + uiZ = softfloat_propagateNaNF64UI( uiZ, uiC ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + zeroProd: + uiZ = uiC; + if ( ! (expC | sigC) && (signZ != signC) ) { + completeCancellation: + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + } + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_negXM.c b/vendor/riscv-isa-sim/softfloat/s_negXM.c new file mode 100644 index 00000000..76f110c1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_negXM.c @@ -0,0 +1,63 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_negXM + +void softfloat_negXM( uint_fast8_t size_words, uint32_t *zPtr ) +{ + unsigned int index, lastIndex; + uint_fast8_t carry; + uint32_t word; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + carry = 1; + for (;;) { + word = ~zPtr[index] + carry; + zPtr[index] = word; + if ( index == lastIndex ) break; + index += wordIncr; + if ( word ) carry = 0; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF128.c b/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF128.c new file mode 100644 index 00000000..148cb2c8 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF128.c @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" + +float128_t + softfloat_normRoundPackToF128( + bool sign, int_fast32_t exp, uint_fast64_t sig64, uint_fast64_t sig0 ) +{ + int_fast8_t shiftDist; + struct uint128 sig128; + union ui128_f128 uZ; + uint_fast64_t sigExtra; + struct uint128_extra sig128Extra; + + if ( ! sig64 ) { + exp -= 64; + sig64 = sig0; + sig0 = 0; + } + shiftDist = softfloat_countLeadingZeros64( sig64 ) - 15; + exp -= shiftDist; + if ( 0 <= shiftDist ) { + if ( shiftDist ) { + sig128 = softfloat_shortShiftLeft128( sig64, sig0, shiftDist ); + sig64 = sig128.v64; + sig0 = sig128.v0; + } + if ( (uint32_t) exp < 0x7FFD ) { + uZ.ui.v64 = packToF128UI64( sign, sig64 | sig0 ? exp : 0, sig64 ); + uZ.ui.v0 = sig0; + return uZ.f; + } + sigExtra = 0; + } else { + sig128Extra = + softfloat_shortShiftRightJam128Extra( sig64, sig0, 0, -shiftDist ); + sig64 = sig128Extra.v.v64; + sig0 = sig128Extra.v.v0; + sigExtra = sig128Extra.extra; + } + return softfloat_roundPackToF128( sign, exp, sig64, sig0, sigExtra ); + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF16.c b/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF16.c new file mode 100644 index 00000000..6788f2e1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_normRoundPackToF16.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" + +float16_t + softfloat_normRoundPackToF16( bool sign, int_fast16_t exp, uint_fast16_t sig ) +{ + int_fast8_t shiftDist; + union ui16_f16 uZ; + + shiftDist = softfloat_countLeadingZeros16( sig ) - 1; + exp -= shiftDist; + if ( (4 <= shiftDist) && ((unsigned int) exp < 0x1D) ) { + uZ.ui = packToF16UI( sign, sig ? exp : 0, sig<<(shiftDist - 4) ); + return uZ.f; + } else { + return softfloat_roundPackToF16( sign, exp, sig< +#include +#include "platform.h" +#include "internals.h" + +float32_t + softfloat_normRoundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + union ui32_f32 uZ; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 1; + exp -= shiftDist; + if ( (7 <= shiftDist) && ((unsigned int) exp < 0xFD) ) { + uZ.ui = packToF32UI( sign, sig ? exp : 0, sig<<(shiftDist - 7) ); + return uZ.f; + } else { + return softfloat_roundPackToF32( sign, exp, sig< +#include +#include "platform.h" +#include "internals.h" + +float64_t + softfloat_normRoundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + union ui64_f64 uZ; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 1; + exp -= shiftDist; + if ( (10 <= shiftDist) && ((unsigned int) exp < 0x7FD) ) { + uZ.ui = packToF64UI( sign, sig ? exp : 0, sig<<(shiftDist - 10) ); + return uZ.f; + } else { + return softfloat_roundPackToF64( sign, exp, sig< +#include "platform.h" +#include "internals.h" + +struct exp32_sig128 + softfloat_normSubnormalF128Sig( uint_fast64_t sig64, uint_fast64_t sig0 ) +{ + int_fast8_t shiftDist; + struct exp32_sig128 z; + + if ( ! sig64 ) { + shiftDist = softfloat_countLeadingZeros64( sig0 ) - 15; + z.exp = -63 - shiftDist; + if ( shiftDist < 0 ) { + z.sig.v64 = sig0>>-shiftDist; + z.sig.v0 = sig0<<(shiftDist & 63); + } else { + z.sig.v64 = sig0< +#include "platform.h" +#include "internals.h" + +struct exp8_sig16 softfloat_normSubnormalF16Sig( uint_fast16_t sig ) +{ + int_fast8_t shiftDist; + struct exp8_sig16 z; + + shiftDist = softfloat_countLeadingZeros16( sig ) - 5; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig32 softfloat_normSubnormalF32Sig( uint_fast32_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig32 z; + + shiftDist = softfloat_countLeadingZeros32( sig ) - 8; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "internals.h" + +struct exp16_sig64 softfloat_normSubnormalF64Sig( uint_fast64_t sig ) +{ + int_fast8_t shiftDist; + struct exp16_sig64 z; + + shiftDist = softfloat_countLeadingZeros64( sig ) - 11; + z.exp = 1 - shiftDist; + z.sig = sig< +#include "platform.h" +#include "primitiveTypes.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting the unsigned integer formed from concatenating `uiA64' and +| `uiA0' as a 128-bit floating-point value, and likewise interpreting the +| unsigned integer formed from concatenating `uiB64' and `uiB0' as another +| 128-bit floating-point value, and assuming at least on of these floating- +| point values is a NaN, returns the bit pattern of the combined NaN result. +| If either original floating-point value is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_propagateNaNF128UI( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0 + ) +{ + struct uint128 uiZ; + + if ( + softfloat_isSigNaNF128UI( uiA64, uiA0 ) + || softfloat_isSigNaNF128UI( uiB64, uiB0 ) + ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + return uiZ; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_propagateNaNF16UI.c b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF16UI.c new file mode 100644 index 00000000..3ecd4c98 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF16UI.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 16-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast16_t + softfloat_propagateNaNF16UI( uint_fast16_t uiA, uint_fast16_t uiB ) +{ + + if ( softfloat_isSigNaNF16UI( uiA ) || softfloat_isSigNaNF16UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return defaultNaNF16UI; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_propagateNaNF32UI.c b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF32UI.c new file mode 100644 index 00000000..b97fa414 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF32UI.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + + if ( softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return defaultNaNF32UI; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_propagateNaNF64UI.c b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF64UI.c new file mode 100644 index 00000000..9c2d3598 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_propagateNaNF64UI.c @@ -0,0 +1,58 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "specialize.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ) +{ + + if ( softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) ) { + softfloat_raiseFlags( softfloat_flag_invalid ); + } + return defaultNaNF64UI; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_remStepMBy32.c b/vendor/riscv-isa-sim/softfloat/s_remStepMBy32.c new file mode 100644 index 00000000..fe787a43 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_remStepMBy32.c @@ -0,0 +1,86 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_remStepMBy32 + +void + softfloat_remStepMBy32( + uint_fast8_t size_words, + const uint32_t *remPtr, + uint_fast8_t dist, + const uint32_t *bPtr, + uint32_t q, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint64_t dwordProd; + uint32_t wordRem, wordShiftedRem, wordProd; + uint_fast8_t uNegDist, borrow; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + dwordProd = (uint64_t) bPtr[index] * q; + wordRem = remPtr[index]; + wordShiftedRem = wordRem<>(uNegDist & 31); + index += wordIncr; + dwordProd = (uint64_t) bPtr[index] * q + (dwordProd>>32); + wordRem = remPtr[index]; + wordShiftedRem |= wordRem< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundMToI64( + bool sign, uint32_t *extSigPtr, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint32_t sigExtra; + bool doIncrement; + uint64_t sig; + union { uint64_t ui; int64_t i; } uZ; + int64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + sigExtra = extSigPtr[indexWordLo( 3 )]; + doIncrement = (0x80000000 <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + sig = + (uint64_t) extSigPtr[indexWord( 3, 2 )]<<32 + | extSigPtr[indexWord( 3, 1 )]; + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + if ( ! (sigExtra & 0x7FFFFFFF) && roundNearEven ) sig &= ~1; + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundMToUI64.c b/vendor/riscv-isa-sim/softfloat/s_roundMToUI64.c new file mode 100644 index 00000000..0377c5bb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundMToUI64.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundMToUI64( + bool sign, uint32_t *extSigPtr, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint32_t sigExtra; + bool doIncrement; + uint64_t sig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + sigExtra = extSigPtr[indexWordLo( 3 )]; + doIncrement = (0x80000000 <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + sig = + (uint64_t) extSigPtr[indexWord( 3, 2 )]<<32 + | extSigPtr[indexWord( 3, 1 )]; + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + if ( ! (sigExtra & 0x7FFFFFFF) && roundNearEven ) sig &= ~1; + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackMToI64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackMToI64.c new file mode 100644 index 00000000..4d5efbb7 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackMToI64.c @@ -0,0 +1,88 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundPackMToI64( + bool sign, uint32_t *extSigPtr, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint32_t sigExtra; + bool doIncrement; + uint64_t sig; + union { uint64_t ui; int64_t i; } uZ; + int64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + sigExtra = extSigPtr[indexWordLo( 3 )]; + doIncrement = (0x80000000 <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + sig = + (uint64_t) extSigPtr[indexWord( 3, 2 )]<<32 + | extSigPtr[indexWord( 3, 1 )]; + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + if ( ! (sigExtra & 0x7FFFFFFF) && roundNearEven ) sig &= ~1; + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackMToUI64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackMToUI64.c new file mode 100644 index 00000000..1a64fdf9 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackMToUI64.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundPackMToUI64( + bool sign, uint32_t *extSigPtr, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint32_t sigExtra; + bool doIncrement; + uint64_t sig; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + sigExtra = extSigPtr[indexWordLo( 3 )]; + doIncrement = (0x80000000 <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + sig = + (uint64_t) extSigPtr[indexWord( 3, 2 )]<<32 + | extSigPtr[indexWord( 3, 1 )]; + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + if ( ! (sigExtra & 0x7FFFFFFF) && roundNearEven ) sig &= ~1; + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToF128.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToF128.c new file mode 100644 index 00000000..eaaa375c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToF128.c @@ -0,0 +1,171 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t + softfloat_roundPackToF128( + bool sign, + int_fast32_t exp, + uint_fast64_t sig64, + uint_fast64_t sig0, + uint_fast64_t sigExtra + ) +{ + uint_fast8_t roundingMode; + bool roundNearEven, doIncrement, isTiny; + struct uint128_extra sig128Extra; + uint_fast64_t uiZ64, uiZ0; + struct uint128 sig128; + union ui128_f128 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x7FFD <= (uint32_t) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess + == softfloat_tininess_beforeRounding) + || (exp < -1) + || ! doIncrement + || softfloat_lt128( + sig64, + sig0, + UINT64_C( 0x0001FFFFFFFFFFFF ), + UINT64_C( 0xFFFFFFFFFFFFFFFF ) + ); + sig128Extra = + softfloat_shiftRightJam128Extra( sig64, sig0, sigExtra, -exp ); + sig64 = sig128Extra.v.v64; + sig0 = sig128Extra.v.v0; + sigExtra = sig128Extra.extra; + exp = 0; + if ( isTiny && sigExtra ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( + ! roundNearEven + && (roundingMode != softfloat_round_near_maxMag) + ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + } else if ( + (0x7FFD < exp) + || ((exp == 0x7FFD) + && softfloat_eq128( + sig64, + sig0, + UINT64_C( 0x0001FFFFFFFFFFFF ), + UINT64_C( 0xFFFFFFFFFFFFFFFF ) + ) + && doIncrement) + ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + if ( + roundNearEven + || (roundingMode == softfloat_round_near_maxMag) + || (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ) { + uiZ64 = packToF128UI64( sign, 0x7FFF, 0 ); + uiZ0 = 0; + } else { + uiZ64 = + packToF128UI64( + sign, 0x7FFE, UINT64_C( 0x0000FFFFFFFFFFFF ) ); + uiZ0 = UINT64_C( 0xFFFFFFFFFFFFFFFF ); + } + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig0 |= 1; + goto packReturn; + } +#endif + } + if ( doIncrement ) { + sig128 = softfloat_add128( sig64, sig0, 0, 1 ); + sig64 = sig128.v64; + sig0 = + sig128.v0 + & ~(uint64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } else { + if ( ! (sig64 | sig0) ) exp = 0; + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ64 = packToF128UI64( sign, exp, sig64 ); + uiZ0 = sig0; + uiZ: + uZ.ui.v64 = uiZ64; + uZ.ui.v0 = uiZ0; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToF16.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToF16.c new file mode 100644 index 00000000..0eaa73a5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToF16.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t + softfloat_roundPackToF16( bool sign, int_fast16_t exp, uint_fast16_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast16_t uiZ; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x8; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xF + : 0; + } + roundBits = sig & 0xF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x1D <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x8000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0xF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0x1D < exp) || (0x8000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF16UI( sign, 0x1F, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>4; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast16_t) (! (roundBits ^ 8) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF16UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToF32.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToF32.c new file mode 100644 index 00000000..cc345085 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToF32.c @@ -0,0 +1,113 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t + softfloat_roundPackToF32( bool sign, int_fast16_t exp, uint_fast32_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + bool isTiny; + uint_fast32_t uiZ; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0xFD <= (unsigned int) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) || (sig + roundIncrement < 0x80000000); + sig = softfloat_shiftRightJam32( sig, -exp ); + exp = 0; + roundBits = sig & 0x7F; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( (0xFD < exp) || (0x80000000 <= sig + roundIncrement) ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF32UI( sign, 0xFF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>7; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF32UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToF64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToF64.c new file mode 100644 index 00000000..aaff008c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToF64.c @@ -0,0 +1,117 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t + softfloat_roundPackToF64( bool sign, int_fast16_t exp, uint_fast64_t sig ) +{ + uint_fast8_t roundingMode; + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + bool isTiny; + uint_fast64_t uiZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundingMode = softfloat_roundingMode; + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x200; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x3FF + : 0; + } + roundBits = sig & 0x3FF; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + if ( 0x7FD <= (uint16_t) exp ) { + if ( exp < 0 ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + isTiny = + (softfloat_detectTininess == softfloat_tininess_beforeRounding) + || (exp < -1) + || (sig + roundIncrement < UINT64_C( 0x8000000000000000 )); + sig = softfloat_shiftRightJam64( sig, -exp ); + exp = 0; + roundBits = sig & 0x3FF; + if ( isTiny && roundBits ) { + softfloat_raiseFlags( softfloat_flag_underflow ); + } + } else if ( + (0x7FD < exp) + || (UINT64_C( 0x8000000000000000 ) <= sig + roundIncrement) + ) { + /*---------------------------------------------------------------- + *----------------------------------------------------------------*/ + softfloat_raiseFlags( + softfloat_flag_overflow | softfloat_flag_inexact ); + uiZ = packToF64UI( sign, 0x7FF, 0 ) - ! roundIncrement; + goto uiZ; + } + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + sig = (sig + roundIncrement)>>10; + if ( roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; +#ifdef SOFTFLOAT_ROUND_ODD + if ( roundingMode == softfloat_round_odd ) { + sig |= 1; + goto packReturn; + } +#endif + } + sig &= ~(uint_fast64_t) (! (roundBits ^ 0x200) & roundNearEven); + if ( ! sig ) exp = 0; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + packReturn: + uiZ = packToF64UI( sign, exp, sig ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToI32.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToI32.c new file mode 100644 index 00000000..3ece8f05 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToI32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t + softfloat_roundPackToI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + uint_fast32_t sig32; + union { uint32_t ui; int32_t i; } uZ; + int_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFFF8000000000 ) ) goto invalid; + sig32 = sig>>7; + sig32 &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + uZ.ui = sign ? -sig32 : sig32; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i32_fromNegOverflow : i32_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToI64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToI64.c new file mode 100644 index 00000000..ebef7f36 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToI64.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundPackToI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + union { uint64_t ui; int64_t i; } uZ; + int_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToUI32.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToUI32.c new file mode 100644 index 00000000..f0021fe5 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToUI32.c @@ -0,0 +1,80 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + softfloat_roundPackToUI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast8_t roundIncrement, roundBits; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x40; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0x7F + : 0; + } + roundBits = sig & 0x7F; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFFF8000000000 ) ) goto invalid; + z = sig>>7; + z &= ~(uint_fast32_t) (! (roundBits ^ 0x40) & roundNearEven); + if ( sign && z ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundPackToUI64.c b/vendor/riscv-isa-sim/softfloat/s_roundPackToUI64.c new file mode 100644 index 00000000..fada1840 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundPackToUI64.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3a+, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundPackToUI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundToI32.c b/vendor/riscv-isa-sim/softfloat/s_roundToI32.c new file mode 100644 index 00000000..20a3ff4f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundToI32.c @@ -0,0 +1,84 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast32_t + softfloat_roundToI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t sig32; + union { uint32_t ui; int32_t i; } uZ; + int_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + sig32 = sig>>12; + sig32 &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + uZ.ui = sign ? -sig32 : sig32; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i32_fromNegOverflow : i32_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundToI64.c b/vendor/riscv-isa-sim/softfloat/s_roundToI64.c new file mode 100644 index 00000000..fcddbc27 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundToI64.c @@ -0,0 +1,89 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +int_fast64_t + softfloat_roundToI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + union { uint64_t ui; int64_t i; } uZ; + int_fast64_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + uZ.ui = sign ? -sig : sig; + z = uZ.i; + if ( z && ((z < 0) ^ sign) ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? i64_fromNegOverflow : i64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundToUI32.c b/vendor/riscv-isa-sim/softfloat/s_roundToUI32.c new file mode 100644 index 00000000..180899bd --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundToUI32.c @@ -0,0 +1,80 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast32_t + softfloat_roundToUI32( + bool sign, uint_fast64_t sig, uint_fast8_t roundingMode, bool exact ) +{ + bool roundNearEven; + uint_fast16_t roundIncrement, roundBits; + uint_fast32_t z; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + roundIncrement = 0x800; + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + roundIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + ? 0xFFF + : 0; + } + roundBits = sig & 0xFFF; + sig += roundIncrement; + if ( sig & UINT64_C( 0xFFFFF00000000000 ) ) goto invalid; + z = sig>>12; + z &= ~(uint_fast32_t) (! (roundBits ^ 0x800) & roundNearEven); + if ( sign && z ) goto invalid; + if ( exact && roundBits ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return z; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui32_fromNegOverflow : ui32_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_roundToUI64.c b/vendor/riscv-isa-sim/softfloat/s_roundToUI64.c new file mode 100644 index 00000000..de35b5eb --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_roundToUI64.c @@ -0,0 +1,85 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +uint_fast64_t + softfloat_roundToUI64( + bool sign, + uint_fast64_t sig, + uint_fast64_t sigExtra, + uint_fast8_t roundingMode, + bool exact + ) +{ + bool roundNearEven, doIncrement; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + roundNearEven = (roundingMode == softfloat_round_near_even); + doIncrement = (UINT64_C( 0x8000000000000000 ) <= sigExtra); + if ( ! roundNearEven && (roundingMode != softfloat_round_near_maxMag) ) { + doIncrement = + (roundingMode + == (sign ? softfloat_round_min : softfloat_round_max)) + && sigExtra; + } + if ( doIncrement ) { + ++sig; + if ( ! sig ) goto invalid; + sig &= + ~(uint_fast64_t) + (! (sigExtra & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + & roundNearEven); + } + if ( sign && sig ) goto invalid; + if ( exact && sigExtra ) { + softfloat_exceptionFlags |= softfloat_flag_inexact; + } + return sig; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + invalid: + softfloat_raiseFlags( softfloat_flag_invalid ); + return sign ? ui64_fromNegOverflow : ui64_fromPosOverflow; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128.c new file mode 100644 index 00000000..8d2b91e8 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128.c @@ -0,0 +1,69 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shiftRightJam128 + +struct uint128 + softfloat_shiftRightJam128( uint64_t a64, uint64_t a0, uint_fast32_t dist ) +{ + uint_fast8_t u8NegDist; + struct uint128 z; + + if ( dist < 64 ) { + u8NegDist = -dist; + z.v64 = a64>>dist; + z.v0 = + a64<<(u8NegDist & 63) | a0>>dist + | ((uint64_t) (a0<<(u8NegDist & 63)) != 0); + } else { + z.v64 = 0; + z.v0 = + (dist < 127) + ? a64>>(dist & 63) + | (((a64 & (((uint_fast64_t) 1<<(dist & 63)) - 1)) | a0) + != 0) + : ((a64 | a0) != 0); + } + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128Extra.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128Extra.c new file mode 100644 index 00000000..4e1293c7 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam128Extra.c @@ -0,0 +1,77 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shiftRightJam128Extra + +struct uint128_extra + softfloat_shiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast32_t dist ) +{ + uint_fast8_t u8NegDist; + struct uint128_extra z; + + u8NegDist = -dist; + if ( dist < 64 ) { + z.v.v64 = a64>>dist; + z.v.v0 = a64<<(u8NegDist & 63) | a0>>dist; + z.extra = a0<<(u8NegDist & 63); + } else { + z.v.v64 = 0; + if ( dist == 64 ) { + z.v.v0 = a64; + z.extra = a0; + } else { + extra |= a0; + if ( dist < 128 ) { + z.v.v0 = a64>>(dist & 63); + z.extra = a64<<(u8NegDist & 63); + } else { + z.v.v0 = 0; + z.extra = (dist == 128) ? a64 : (a64 != 0); + } + } + } + z.extra |= (extra != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam256M.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam256M.c new file mode 100644 index 00000000..04cd1e50 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam256M.c @@ -0,0 +1,126 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shiftRightJam256M + +static + void + softfloat_shortShiftRightJamM( + uint_fast8_t size_words, + const uint64_t *aPtr, + uint_fast8_t dist, + uint64_t *zPtr + ) +{ + uint_fast8_t uNegDist; + unsigned int index, lastIndex; + uint64_t partWordZ, wordA; + + uNegDist = -dist; + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + wordA = aPtr[index]; + partWordZ = wordA>>dist; + if ( partWordZ<>dist; + } + zPtr[index] = partWordZ; + +} + +void + softfloat_shiftRightJam256M( + const uint64_t *aPtr, uint_fast32_t dist, uint64_t *zPtr ) +{ + uint64_t wordJam; + uint_fast32_t wordDist; + uint64_t *ptr; + uint_fast8_t i, innerDist; + + wordJam = 0; + wordDist = dist>>6; + if ( wordDist ) { + if ( 4 < wordDist ) wordDist = 4; + ptr = (uint64_t *) (aPtr + indexMultiwordLo( 4, wordDist )); + i = wordDist; + do { + wordJam = *ptr++; + if ( wordJam ) break; + --i; + } while ( i ); + ptr = zPtr; + } + if ( wordDist < 4 ) { + aPtr += indexMultiwordHiBut( 4, wordDist ); + innerDist = dist & 63; + if ( innerDist ) { + softfloat_shortShiftRightJamM( + 4 - wordDist, + aPtr, + innerDist, + zPtr + indexMultiwordLoBut( 4, wordDist ) + ); + if ( ! wordDist ) goto wordJam; + } else { + aPtr += indexWordLo( 4 - wordDist ); + ptr = zPtr + indexWordLo( 4 ); + for ( i = 4 - wordDist; i; --i ) { + *ptr = *aPtr; + aPtr += wordIncr; + ptr += wordIncr; + } + } + ptr = zPtr + indexMultiwordHi( 4, wordDist ); + } + do { + *ptr++ = 0; + --wordDist; + } while ( wordDist ); + wordJam: + if ( wordJam ) zPtr[indexWordLo( 4 )] |= 1; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam32.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam32.c new file mode 100644 index 00000000..fbc3aa01 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam32.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_shiftRightJam32 + +uint32_t softfloat_shiftRightJam32( uint32_t a, uint_fast16_t dist ) +{ + + return + (dist < 31) ? a>>dist | ((uint32_t) (a<<(-dist & 31)) != 0) : (a != 0); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64.c new file mode 100644 index 00000000..34edd7bf --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64.c @@ -0,0 +1,51 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_shiftRightJam64 + +uint64_t softfloat_shiftRightJam64( uint64_t a, uint_fast32_t dist ) +{ + + return + (dist < 63) ? a>>dist | ((uint64_t) (a<<(-dist & 63)) != 0) : (a != 0); + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64Extra.c b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64Extra.c new file mode 100644 index 00000000..4d787122 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shiftRightJam64Extra.c @@ -0,0 +1,62 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shiftRightJam64Extra + +struct uint64_extra + softfloat_shiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast32_t dist ) +{ + struct uint64_extra z; + + if ( dist < 64 ) { + z.v = a>>dist; + z.extra = a<<(-dist & 63); + } else { + z.v = 0; + z.extra = (dist == 64) ? a : (a != 0); + } + z.extra |= (extra != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftLeft128.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftLeft128.c new file mode 100644 index 00000000..9b7c0672 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftLeft128.c @@ -0,0 +1,55 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftLeft128 + +struct uint128 + softfloat_shortShiftLeft128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + + z.v64 = a64<>(-dist & 63); + z.v0 = a0< +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftLeft64To96M + +void + softfloat_shortShiftLeft64To96M( + uint64_t a, uint_fast8_t dist, uint32_t *zPtr ) +{ + + zPtr[indexWord( 3, 0 )] = (uint32_t) a<>= 32 - dist; + zPtr[indexWord( 3, 2 )] = a>>32; + zPtr[indexWord( 3, 1 )] = a; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRight128.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRight128.c new file mode 100644 index 00000000..28c39bb2 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRight128.c @@ -0,0 +1,55 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRight128 + +struct uint128 + softfloat_shortShiftRight128( uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + struct uint128 z; + + z.v64 = a64>>dist; + z.v0 = a64<<(-dist & 63) | a0>>dist; + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightExtendM.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightExtendM.c new file mode 100644 index 00000000..309188c3 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightExtendM.c @@ -0,0 +1,73 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightExtendM + +void + softfloat_shortShiftRightExtendM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint_fast8_t dist, + uint32_t *zPtr + ) +{ + uint_fast8_t uNegDist; + unsigned int indexA, lastIndexA; + uint32_t partWordZ, wordA; + + uNegDist = -dist; + indexA = indexWordLo( size_words ); + lastIndexA = indexWordHi( size_words ); + zPtr += indexWordLo( size_words + 1 ); + partWordZ = 0; + for (;;) { + wordA = aPtr[indexA]; + *zPtr = wordA<<(uNegDist & 31) | partWordZ; + zPtr += wordIncr; + partWordZ = wordA>>dist; + if ( indexA == lastIndexA ) break; + indexA += wordIncr; + } + *zPtr = partWordZ; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128.c new file mode 100644 index 00000000..3eb0dd40 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightJam128 + +struct uint128 + softfloat_shortShiftRightJam128( + uint64_t a64, uint64_t a0, uint_fast8_t dist ) +{ + uint_fast8_t uNegDist; + struct uint128 z; + + uNegDist = -dist; + z.v64 = a64>>dist; + z.v0 = + a64<<(uNegDist & 63) | a0>>dist + | ((uint64_t) (a0<<(uNegDist & 63)) != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128Extra.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128Extra.c new file mode 100644 index 00000000..13692a0d --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam128Extra.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightJam128Extra + +struct uint128_extra + softfloat_shortShiftRightJam128Extra( + uint64_t a64, uint64_t a0, uint64_t extra, uint_fast8_t dist ) +{ + uint_fast8_t uNegDist; + struct uint128_extra z; + + uNegDist = -dist; + z.v.v64 = a64>>dist; + z.v.v0 = a64<<(uNegDist & 63) | a0>>dist; + z.extra = a0<<(uNegDist & 63) | (extra != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam64.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam64.c new file mode 100644 index 00000000..7e93cd4f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightJam64.c @@ -0,0 +1,50 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" + +#ifndef softfloat_shortShiftRightJam64 + +uint64_t softfloat_shortShiftRightJam64( uint64_t a, uint_fast8_t dist ) +{ + + return a>>dist | ((a & (((uint_fast64_t) 1< +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightJam64Extra + +struct uint64_extra + softfloat_shortShiftRightJam64Extra( + uint64_t a, uint64_t extra, uint_fast8_t dist ) +{ + struct uint64_extra z; + + z.v = a>>dist; + z.extra = a<<(-dist & 63) | (extra != 0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_shortShiftRightM.c b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightM.c new file mode 100644 index 00000000..308ad59c --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_shortShiftRightM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_shortShiftRightM + +void + softfloat_shortShiftRightM( + uint_fast8_t size_words, + const uint32_t *aPtr, + uint_fast8_t dist, + uint32_t *zPtr + ) +{ + uint_fast8_t uNegDist; + unsigned int index, lastIndex; + uint32_t partWordZ, wordA; + + uNegDist = -dist; + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + partWordZ = aPtr[index]>>dist; + while ( index != lastIndex ) { + wordA = aPtr[index + wordIncr]; + zPtr[index] = wordA<<(uNegDist & 31) | partWordZ; + index += wordIncr; + partWordZ = wordA>>dist; + } + zPtr[index] = partWordZ; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_sub128.c b/vendor/riscv-isa-sim/softfloat/s_sub128.c new file mode 100644 index 00000000..ed86e100 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_sub128.c @@ -0,0 +1,55 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_sub128 + +struct uint128 + softfloat_sub128( uint64_t a64, uint64_t a0, uint64_t b64, uint64_t b0 ) +{ + struct uint128 z; + + z.v0 = a0 - b0; + z.v64 = a64 - b64 - (a0 < b0); + return z; + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_sub1XM.c b/vendor/riscv-isa-sim/softfloat/s_sub1XM.c new file mode 100644 index 00000000..73773e5b --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_sub1XM.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_sub1XM + +void softfloat_sub1XM( uint_fast8_t size_words, uint32_t *zPtr ) +{ + unsigned int index, lastIndex; + uint32_t wordA; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + for (;;) { + wordA = zPtr[index]; + zPtr[index] = wordA - 1; + if ( wordA || (index == lastIndex) ) break; + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_sub256M.c b/vendor/riscv-isa-sim/softfloat/s_sub256M.c new file mode 100644 index 00000000..c07b45ea --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_sub256M.c @@ -0,0 +1,65 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_sub256M + +void + softfloat_sub256M( + const uint64_t *aPtr, const uint64_t *bPtr, uint64_t *zPtr ) +{ + unsigned int index; + uint_fast8_t borrow; + uint64_t wordA, wordB; + + index = indexWordLo( 4 ); + borrow = 0; + for (;;) { + wordA = aPtr[index]; + wordB = bPtr[index]; + zPtr[index] = wordA - wordB - borrow; + if ( index == indexWordHi( 4 ) ) break; + borrow = borrow ? (wordA <= wordB) : (wordA < wordB); + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_subM.c b/vendor/riscv-isa-sim/softfloat/s_subM.c new file mode 100644 index 00000000..003f699f --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_subM.c @@ -0,0 +1,70 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "primitiveTypes.h" + +#ifndef softfloat_subM + +void + softfloat_subM( + uint_fast8_t size_words, + const uint32_t *aPtr, + const uint32_t *bPtr, + uint32_t *zPtr + ) +{ + unsigned int index, lastIndex; + uint_fast8_t borrow; + uint32_t wordA, wordB; + + index = indexWordLo( size_words ); + lastIndex = indexWordHi( size_words ); + borrow = 0; + for (;;) { + wordA = aPtr[index]; + wordB = bPtr[index]; + zPtr[index] = wordA - wordB - borrow; + if ( index == lastIndex ) break; + borrow = borrow ? (wordA <= wordB) : (wordA < wordB); + index += wordIncr; + } + +} + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/s_subMagsF128.c b/vendor/riscv-isa-sim/softfloat/s_subMagsF128.c new file mode 100644 index 00000000..c4264d54 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_subMagsF128.c @@ -0,0 +1,139 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float128_t + softfloat_subMagsF128( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0, + bool signZ + ) +{ + int_fast32_t expA; + struct uint128 sigA; + int_fast32_t expB; + struct uint128 sigB, sigZ; + int_fast32_t expDiff, expZ; + struct uint128 uiZ; + union ui128_f128 uZ; + + expA = expF128UI64( uiA64 ); + sigA.v64 = fracF128UI64( uiA64 ); + sigA.v0 = uiA0; + expB = expF128UI64( uiB64 ); + sigB.v64 = fracF128UI64( uiB64 ); + sigB.v0 = uiB0; + sigA = softfloat_shortShiftLeft128( sigA.v64, sigA.v0, 4 ); + sigB = softfloat_shortShiftLeft128( sigB.v64, sigB.v0, 4 ); + expDiff = expA - expB; + if ( 0 < expDiff ) goto expABigger; + if ( expDiff < 0 ) goto expBBigger; + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 | sigB.v64 | sigB.v0 ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + goto uiZ; + } + expZ = expA; + if ( ! expZ ) expZ = 1; + if ( sigB.v64 < sigA.v64 ) goto aBigger; + if ( sigA.v64 < sigB.v64 ) goto bBigger; + if ( sigB.v0 < sigA.v0 ) goto aBigger; + if ( sigA.v0 < sigB.v0 ) goto bBigger; + uiZ.v64 = + packToF128UI64( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + uiZ.v0 = 0; + goto uiZ; + expBBigger: + if ( expB == 0x7FFF ) { + if ( sigB.v64 | sigB.v0 ) goto propagateNaN; + uiZ.v64 = packToF128UI64( signZ ^ 1, 0x7FFF, 0 ); + uiZ.v0 = 0; + goto uiZ; + } + if ( expA ) { + sigA.v64 |= UINT64_C( 0x0010000000000000 ); + } else { + ++expDiff; + if ( ! expDiff ) goto newlyAlignedBBigger; + } + sigA = softfloat_shiftRightJam128( sigA.v64, sigA.v0, -expDiff ); + newlyAlignedBBigger: + expZ = expB; + sigB.v64 |= UINT64_C( 0x0010000000000000 ); + bBigger: + signZ = ! signZ; + sigZ = softfloat_sub128( sigB.v64, sigB.v0, sigA.v64, sigA.v0 ); + goto normRoundPack; + expABigger: + if ( expA == 0x7FFF ) { + if ( sigA.v64 | sigA.v0 ) goto propagateNaN; + uiZ.v64 = uiA64; + uiZ.v0 = uiA0; + goto uiZ; + } + if ( expB ) { + sigB.v64 |= UINT64_C( 0x0010000000000000 ); + } else { + --expDiff; + if ( ! expDiff ) goto newlyAlignedABigger; + } + sigB = softfloat_shiftRightJam128( sigB.v64, sigB.v0, expDiff ); + newlyAlignedABigger: + expZ = expA; + sigA.v64 |= UINT64_C( 0x0010000000000000 ); + aBigger: + sigZ = softfloat_sub128( sigA.v64, sigA.v0, sigB.v64, sigB.v0 ); + normRoundPack: + return softfloat_normRoundPackToF128( signZ, expZ - 5, sigZ.v64, sigZ.v0 ); + propagateNaN: + uiZ = softfloat_propagateNaNF128UI( uiA64, uiA0, uiB64, uiB0 ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_subMagsF16.c b/vendor/riscv-isa-sim/softfloat/s_subMagsF16.c new file mode 100644 index 00000000..5ec579e8 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_subMagsF16.c @@ -0,0 +1,187 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the +University of California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float16_t softfloat_subMagsF16( uint_fast16_t uiA, uint_fast16_t uiB ) +{ + int_fast8_t expA; + uint_fast16_t sigA; + int_fast8_t expB; + uint_fast16_t sigB; + int_fast8_t expDiff; + uint_fast16_t uiZ; + int_fast16_t sigDiff; + bool signZ; + int_fast8_t shiftDist, expZ; + uint_fast16_t sigZ, sigX, sigY; + uint_fast32_t sig32Z; + int_fast8_t roundingMode; + union ui16_f16 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF16UI( uiA ); + sigA = fracF16UI( uiA ); + expB = expF16UI( uiB ); + sigB = fracF16UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0x1F ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF16UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF16UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + signZ = signF16UI( uiA ); + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros16( sigDiff ) - 5; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + sigZ = sigDiff<>16; + if ( sig32Z & 0xFFFF ) { + sigZ |= 1; + } else { + if ( ! (sigZ & 0xF) && ((unsigned int) expZ < 0x1E) ) { + sigZ >>= 4; + goto pack; + } + } + return softfloat_roundPackToF16( signZ, expZ, sigZ ); + } + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + propagateNaN: + uiZ = softfloat_propagateNaNF16UI( uiA, uiB ); + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + subEpsilon: + roundingMode = softfloat_roundingMode; + if ( roundingMode != softfloat_round_near_even ) { + if ( + (roundingMode == softfloat_round_minMag) + || (roundingMode + == (signF16UI( uiZ ) ? softfloat_round_max + : softfloat_round_min)) + ) { + --uiZ; + } +#ifdef SOFTFLOAT_ROUND_ODD + else if ( roundingMode == softfloat_round_odd ) { + uiZ = (uiZ - 1) | 1; + } +#endif + } + softfloat_exceptionFlags |= softfloat_flag_inexact; + goto uiZ; + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + pack: + uiZ = packToF16UI( signZ, expZ, sigZ ); + uiZ: + uZ.ui = uiZ; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/s_subMagsF32.c b/vendor/riscv-isa-sim/softfloat/s_subMagsF32.c new file mode 100644 index 00000000..86e89f2e --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/s_subMagsF32.c @@ -0,0 +1,143 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float32_t softfloat_subMagsF32( uint_fast32_t uiA, uint_fast32_t uiB ) +{ + int_fast16_t expA; + uint_fast32_t sigA; + int_fast16_t expB; + uint_fast32_t sigB; + int_fast16_t expDiff; + uint_fast32_t uiZ; + int_fast32_t sigDiff; + bool signZ; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast32_t sigX, sigY; + union ui32_f32 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF32UI( uiA ); + sigA = fracF32UI( uiA ); + expB = expF32UI( uiB ); + sigB = fracF32UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0xFF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF32UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF32UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + signZ = signF32UI( uiA ); + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros32( sigDiff ) - 8; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF32UI( signZ, expZ, sigDiff< +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +float64_t + softfloat_subMagsF64( uint_fast64_t uiA, uint_fast64_t uiB, bool signZ ) +{ + int_fast16_t expA; + uint_fast64_t sigA; + int_fast16_t expB; + uint_fast64_t sigB; + int_fast16_t expDiff; + uint_fast64_t uiZ; + int_fast64_t sigDiff; + int_fast8_t shiftDist; + int_fast16_t expZ; + uint_fast64_t sigZ; + union ui64_f64 uZ; + + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expA = expF64UI( uiA ); + sigA = fracF64UI( uiA ); + expB = expF64UI( uiB ); + sigB = fracF64UI( uiB ); + /*------------------------------------------------------------------------ + *------------------------------------------------------------------------*/ + expDiff = expA - expB; + if ( ! expDiff ) { + /*-------------------------------------------------------------------- + *--------------------------------------------------------------------*/ + if ( expA == 0x7FF ) { + if ( sigA | sigB ) goto propagateNaN; + softfloat_raiseFlags( softfloat_flag_invalid ); + uiZ = defaultNaNF64UI; + goto uiZ; + } + sigDiff = sigA - sigB; + if ( ! sigDiff ) { + uiZ = + packToF64UI( + (softfloat_roundingMode == softfloat_round_min), 0, 0 ); + goto uiZ; + } + if ( expA ) --expA; + if ( sigDiff < 0 ) { + signZ = ! signZ; + sigDiff = -sigDiff; + } + shiftDist = softfloat_countLeadingZeros64( sigDiff ) - 11; + expZ = expA - shiftDist; + if ( expZ < 0 ) { + shiftDist = expA; + expZ = 0; + } + uiZ = packToF64UI( signZ, expZ, sigDiff< +#include +#include "softfloat_types.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/*---------------------------------------------------------------------------- +| Software floating-point underflow tininess-detection mode. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_detectTininess; +enum { + softfloat_tininess_beforeRounding = 0, + softfloat_tininess_afterRounding = 1 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point rounding mode. (Mode "odd" is supported only if +| SoftFloat is compiled with macro 'SOFTFLOAT_ROUND_ODD' defined.) +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_roundingMode; +enum { + softfloat_round_near_even = 0, + softfloat_round_minMag = 1, + softfloat_round_min = 2, + softfloat_round_max = 3, + softfloat_round_near_maxMag = 4, + softfloat_round_odd = 5 +}; + +/*---------------------------------------------------------------------------- +| Software floating-point exception flags. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags; +enum { + softfloat_flag_inexact = 1, + softfloat_flag_underflow = 2, + softfloat_flag_overflow = 4, + softfloat_flag_infinite = 8, + softfloat_flag_invalid = 16 +}; + +/*---------------------------------------------------------------------------- +| Routine to raise any or all of the software floating-point exception flags. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t ); + +/*---------------------------------------------------------------------------- +| Integer-to-floating-point conversion routines. +*----------------------------------------------------------------------------*/ +float16_t ui32_to_f16( uint32_t ); +float32_t ui32_to_f32( uint32_t ); +float64_t ui32_to_f64( uint32_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t ui32_to_extF80( uint32_t ); +float128_t ui32_to_f128( uint32_t ); +#endif +void ui32_to_extF80M( uint32_t, extFloat80_t * ); +void ui32_to_f128M( uint32_t, float128_t * ); +float16_t ui64_to_f16( uint64_t ); +float32_t ui64_to_f32( uint64_t ); +float64_t ui64_to_f64( uint64_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t ui64_to_extF80( uint64_t ); +float128_t ui64_to_f128( uint64_t ); +#endif +void ui64_to_extF80M( uint64_t, extFloat80_t * ); +void ui64_to_f128M( uint64_t, float128_t * ); +float16_t i32_to_f16( int32_t ); +float32_t i32_to_f32( int32_t ); +float64_t i32_to_f64( int32_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t i32_to_extF80( int32_t ); +float128_t i32_to_f128( int32_t ); +#endif +void i32_to_extF80M( int32_t, extFloat80_t * ); +void i32_to_f128M( int32_t, float128_t * ); +float16_t i64_to_f16( int64_t ); +float32_t i64_to_f32( int64_t ); +float64_t i64_to_f64( int64_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t i64_to_extF80( int64_t ); +float128_t i64_to_f128( int64_t ); +#endif +void i64_to_extF80M( int64_t, extFloat80_t * ); +void i64_to_f128M( int64_t, float128_t * ); + +/*---------------------------------------------------------------------------- +| 16-bit (half-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast8_t f16_to_ui8( float16_t, uint_fast8_t, bool ); +uint_fast16_t f16_to_ui16( float16_t, uint_fast8_t, bool ); +uint_fast32_t f16_to_ui32( float16_t, uint_fast8_t, bool ); +uint_fast64_t f16_to_ui64( float16_t, uint_fast8_t, bool ); +int_fast8_t f16_to_i8( float16_t, uint_fast8_t, bool ); +int_fast16_t f16_to_i16( float16_t, uint_fast8_t, bool ); +int_fast32_t f16_to_i32( float16_t, uint_fast8_t, bool ); +int_fast64_t f16_to_i64( float16_t, uint_fast8_t, bool ); +uint_fast32_t f16_to_ui32_r_minMag( float16_t, bool ); +uint_fast64_t f16_to_ui64_r_minMag( float16_t, bool ); +int_fast32_t f16_to_i32_r_minMag( float16_t, bool ); +int_fast64_t f16_to_i64_r_minMag( float16_t, bool ); +float32_t f16_to_f32( float16_t ); +float64_t f16_to_f64( float16_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t f16_to_extF80( float16_t ); +float128_t f16_to_f128( float16_t ); +#endif +void f16_to_extF80M( float16_t, extFloat80_t * ); +void f16_to_f128M( float16_t, float128_t * ); +float16_t f16_roundToInt( float16_t, uint_fast8_t, bool ); +float16_t f16_add( float16_t, float16_t ); +float16_t f16_sub( float16_t, float16_t ); +float16_t f16_max( float16_t, float16_t ); +float16_t f16_min( float16_t, float16_t ); +float16_t f16_mul( float16_t, float16_t ); +float16_t f16_mulAdd( float16_t, float16_t, float16_t ); +float16_t f16_div( float16_t, float16_t ); +float16_t f16_rem( float16_t, float16_t ); +float16_t f16_sqrt( float16_t ); +bool f16_eq( float16_t, float16_t ); +bool f16_le( float16_t, float16_t ); +bool f16_lt( float16_t, float16_t ); +bool f16_eq_signaling( float16_t, float16_t ); +bool f16_le_quiet( float16_t, float16_t ); +bool f16_lt_quiet( float16_t, float16_t ); +bool f16_isSignalingNaN( float16_t ); +uint_fast16_t f16_classify( float16_t ); +float16_t f16_rsqrte7( float16_t ); +float16_t f16_recip7( float16_t ); + +/*---------------------------------------------------------------------------- +| 32-bit (single-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast16_t f32_to_ui16( float32_t, uint_fast8_t, bool ); +uint_fast32_t f32_to_ui32( float32_t, uint_fast8_t, bool ); +uint_fast64_t f32_to_ui64( float32_t, uint_fast8_t, bool ); +int_fast16_t f32_to_i16( float32_t, uint_fast8_t, bool ); +int_fast32_t f32_to_i32( float32_t, uint_fast8_t, bool ); +int_fast64_t f32_to_i64( float32_t, uint_fast8_t, bool ); +uint_fast32_t f32_to_ui32_r_minMag( float32_t, bool ); +uint_fast64_t f32_to_ui64_r_minMag( float32_t, bool ); +int_fast32_t f32_to_i32_r_minMag( float32_t, bool ); +int_fast64_t f32_to_i64_r_minMag( float32_t, bool ); +float16_t f32_to_f16( float32_t ); +float64_t f32_to_f64( float32_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t f32_to_extF80( float32_t ); +float128_t f32_to_f128( float32_t ); +#endif +void f32_to_extF80M( float32_t, extFloat80_t * ); +void f32_to_f128M( float32_t, float128_t * ); +float32_t f32_roundToInt( float32_t, uint_fast8_t, bool ); +float32_t f32_add( float32_t, float32_t ); +float32_t f32_sub( float32_t, float32_t ); +float32_t f32_max( float32_t, float32_t ); +float32_t f32_min( float32_t, float32_t ); +float32_t f32_mul( float32_t, float32_t ); +float32_t f32_mulAdd( float32_t, float32_t, float32_t ); +float32_t f32_div( float32_t, float32_t ); +float32_t f32_rem( float32_t, float32_t ); +float32_t f32_sqrt( float32_t ); +bool f32_eq( float32_t, float32_t ); +bool f32_le( float32_t, float32_t ); +bool f32_lt( float32_t, float32_t ); +bool f32_eq_signaling( float32_t, float32_t ); +bool f32_le_quiet( float32_t, float32_t ); +bool f32_lt_quiet( float32_t, float32_t ); +bool f32_isSignalingNaN( float32_t ); +uint_fast16_t f32_classify( float32_t ); +float32_t f32_rsqrte7( float32_t ); +float32_t f32_recip7( float32_t ); + +/*---------------------------------------------------------------------------- +| 64-bit (double-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +uint_fast32_t f64_to_ui32( float64_t, uint_fast8_t, bool ); +uint_fast64_t f64_to_ui64( float64_t, uint_fast8_t, bool ); +int_fast32_t f64_to_i32( float64_t, uint_fast8_t, bool ); +int_fast64_t f64_to_i64( float64_t, uint_fast8_t, bool ); +uint_fast32_t f64_to_ui32_r_minMag( float64_t, bool ); +uint_fast64_t f64_to_ui64_r_minMag( float64_t, bool ); +int_fast32_t f64_to_i32_r_minMag( float64_t, bool ); +int_fast64_t f64_to_i64_r_minMag( float64_t, bool ); +float16_t f64_to_f16( float64_t ); +float32_t f64_to_f32( float64_t ); +#ifdef SOFTFLOAT_FAST_INT64 +extFloat80_t f64_to_extF80( float64_t ); +float128_t f64_to_f128( float64_t ); +#endif +void f64_to_extF80M( float64_t, extFloat80_t * ); +void f64_to_f128M( float64_t, float128_t * ); +float64_t f64_roundToInt( float64_t, uint_fast8_t, bool ); +float64_t f64_add( float64_t, float64_t ); +float64_t f64_sub( float64_t, float64_t ); +float64_t f64_max( float64_t, float64_t ); +float64_t f64_min( float64_t, float64_t ); +float64_t f64_mul( float64_t, float64_t ); +float64_t f64_mulAdd( float64_t, float64_t, float64_t ); +float64_t f64_div( float64_t, float64_t ); +float64_t f64_rem( float64_t, float64_t ); +float64_t f64_sqrt( float64_t ); +bool f64_eq( float64_t, float64_t ); +bool f64_le( float64_t, float64_t ); +bool f64_lt( float64_t, float64_t ); +bool f64_eq_signaling( float64_t, float64_t ); +bool f64_le_quiet( float64_t, float64_t ); +bool f64_lt_quiet( float64_t, float64_t ); +bool f64_isSignalingNaN( float64_t ); +uint_fast16_t f64_classify( float64_t ); +float64_t f64_rsqrte7( float64_t ); +float64_t f64_recip7( float64_t ); + +/*---------------------------------------------------------------------------- +| Rounding precision for 80-bit extended double-precision floating-point. +| Valid values are 32, 64, and 80. +*----------------------------------------------------------------------------*/ +extern THREAD_LOCAL uint_fast8_t extF80_roundingPrecision; + +/*---------------------------------------------------------------------------- +| 80-bit extended double-precision floating-point operations. +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_INT64 +uint_fast32_t extF80_to_ui32( extFloat80_t, uint_fast8_t, bool ); +uint_fast64_t extF80_to_ui64( extFloat80_t, uint_fast8_t, bool ); +int_fast32_t extF80_to_i32( extFloat80_t, uint_fast8_t, bool ); +int_fast64_t extF80_to_i64( extFloat80_t, uint_fast8_t, bool ); +uint_fast32_t extF80_to_ui32_r_minMag( extFloat80_t, bool ); +uint_fast64_t extF80_to_ui64_r_minMag( extFloat80_t, bool ); +int_fast32_t extF80_to_i32_r_minMag( extFloat80_t, bool ); +int_fast64_t extF80_to_i64_r_minMag( extFloat80_t, bool ); +float16_t extF80_to_f16( extFloat80_t ); +float32_t extF80_to_f32( extFloat80_t ); +float64_t extF80_to_f64( extFloat80_t ); +float128_t extF80_to_f128( extFloat80_t ); +extFloat80_t extF80_roundToInt( extFloat80_t, uint_fast8_t, bool ); +extFloat80_t extF80_add( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_sub( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_mul( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_div( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_rem( extFloat80_t, extFloat80_t ); +extFloat80_t extF80_sqrt( extFloat80_t ); +bool extF80_eq( extFloat80_t, extFloat80_t ); +bool extF80_le( extFloat80_t, extFloat80_t ); +bool extF80_lt( extFloat80_t, extFloat80_t ); +bool extF80_eq_signaling( extFloat80_t, extFloat80_t ); +bool extF80_le_quiet( extFloat80_t, extFloat80_t ); +bool extF80_lt_quiet( extFloat80_t, extFloat80_t ); +bool extF80_isSignalingNaN( extFloat80_t ); +#endif +uint_fast32_t extF80M_to_ui32( const extFloat80_t *, uint_fast8_t, bool ); +uint_fast64_t extF80M_to_ui64( const extFloat80_t *, uint_fast8_t, bool ); +int_fast32_t extF80M_to_i32( const extFloat80_t *, uint_fast8_t, bool ); +int_fast64_t extF80M_to_i64( const extFloat80_t *, uint_fast8_t, bool ); +uint_fast32_t extF80M_to_ui32_r_minMag( const extFloat80_t *, bool ); +uint_fast64_t extF80M_to_ui64_r_minMag( const extFloat80_t *, bool ); +int_fast32_t extF80M_to_i32_r_minMag( const extFloat80_t *, bool ); +int_fast64_t extF80M_to_i64_r_minMag( const extFloat80_t *, bool ); +float16_t extF80M_to_f16( const extFloat80_t * ); +float32_t extF80M_to_f32( const extFloat80_t * ); +float64_t extF80M_to_f64( const extFloat80_t * ); +void extF80M_to_f128M( const extFloat80_t *, float128_t * ); +void + extF80M_roundToInt( + const extFloat80_t *, uint_fast8_t, bool, extFloat80_t * ); +void extF80M_add( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_sub( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_mul( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_div( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_rem( const extFloat80_t *, const extFloat80_t *, extFloat80_t * ); +void extF80M_sqrt( const extFloat80_t *, extFloat80_t * ); +bool extF80M_eq( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_le( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_lt( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_eq_signaling( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_le_quiet( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_lt_quiet( const extFloat80_t *, const extFloat80_t * ); +bool extF80M_isSignalingNaN( const extFloat80_t * ); + +/*---------------------------------------------------------------------------- +| 128-bit (quadruple-precision) floating-point operations. +*----------------------------------------------------------------------------*/ +#ifdef SOFTFLOAT_FAST_INT64 +uint_fast32_t f128_to_ui32( float128_t, uint_fast8_t, bool ); +uint_fast64_t f128_to_ui64( float128_t, uint_fast8_t, bool ); +int_fast32_t f128_to_i32( float128_t, uint_fast8_t, bool ); +int_fast64_t f128_to_i64( float128_t, uint_fast8_t, bool ); +uint_fast32_t f128_to_ui32_r_minMag( float128_t, bool ); +uint_fast64_t f128_to_ui64_r_minMag( float128_t, bool ); +int_fast32_t f128_to_i32_r_minMag( float128_t, bool ); +int_fast64_t f128_to_i64_r_minMag( float128_t, bool ); +float16_t f128_to_f16( float128_t ); +float32_t f128_to_f32( float128_t ); +float64_t f128_to_f64( float128_t ); +extFloat80_t f128_to_extF80( float128_t ); +float128_t f128_roundToInt( float128_t, uint_fast8_t, bool ); +float128_t f128_add( float128_t, float128_t ); +float128_t f128_sub( float128_t, float128_t ); +float128_t f128_mul( float128_t, float128_t ); +float128_t f128_mulAdd( float128_t, float128_t, float128_t ); +float128_t f128_div( float128_t, float128_t ); +float128_t f128_rem( float128_t, float128_t ); +float128_t f128_sqrt( float128_t ); +bool f128_eq( float128_t, float128_t ); +bool f128_le( float128_t, float128_t ); +bool f128_lt( float128_t, float128_t ); +bool f128_eq_signaling( float128_t, float128_t ); +bool f128_le_quiet( float128_t, float128_t ); +bool f128_lt_quiet( float128_t, float128_t ); +bool f128_isSignalingNaN( float128_t ); +uint_fast16_t f128_classify( float128_t ); +#endif +uint_fast32_t f128M_to_ui32( const float128_t *, uint_fast8_t, bool ); +uint_fast64_t f128M_to_ui64( const float128_t *, uint_fast8_t, bool ); +int_fast32_t f128M_to_i32( const float128_t *, uint_fast8_t, bool ); +int_fast64_t f128M_to_i64( const float128_t *, uint_fast8_t, bool ); +uint_fast32_t f128M_to_ui32_r_minMag( const float128_t *, bool ); +uint_fast64_t f128M_to_ui64_r_minMag( const float128_t *, bool ); +int_fast32_t f128M_to_i32_r_minMag( const float128_t *, bool ); +int_fast64_t f128M_to_i64_r_minMag( const float128_t *, bool ); +float16_t f128M_to_f16( const float128_t * ); +float32_t f128M_to_f32( const float128_t * ); +float64_t f128M_to_f64( const float128_t * ); +void f128M_to_extF80M( const float128_t *, extFloat80_t * ); +void f128M_roundToInt( const float128_t *, uint_fast8_t, bool, float128_t * ); +void f128M_add( const float128_t *, const float128_t *, float128_t * ); +void f128M_sub( const float128_t *, const float128_t *, float128_t * ); +void f128M_mul( const float128_t *, const float128_t *, float128_t * ); +void + f128M_mulAdd( + const float128_t *, const float128_t *, const float128_t *, float128_t * + ); +void f128M_div( const float128_t *, const float128_t *, float128_t * ); +void f128M_rem( const float128_t *, const float128_t *, float128_t * ); +void f128M_sqrt( const float128_t *, float128_t * ); +bool f128M_eq( const float128_t *, const float128_t * ); +bool f128M_le( const float128_t *, const float128_t * ); +bool f128M_lt( const float128_t *, const float128_t * ); +bool f128M_eq_signaling( const float128_t *, const float128_t * ); +bool f128M_le_quiet( const float128_t *, const float128_t * ); +bool f128M_lt_quiet( const float128_t *, const float128_t * ); +bool f128M_isSignalingNaN( const float128_t * ); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/softfloat.mk.in b/vendor/riscv-isa-sim/softfloat/softfloat.mk.in new file mode 100644 index 00000000..a20ab7ee --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/softfloat.mk.in @@ -0,0 +1,241 @@ +softfloat_subproject_deps = + +softfloat_hdrs = \ + internals.h \ + platform.h \ + primitives.h \ + primitiveTypes.h \ + softfloat.h \ + softfloat_types.h \ + specialize.h \ + +softfloat_c_srcs = \ + f128_add.c \ + f128_classify.c \ + f128_div.c \ + f128_eq.c \ + f128_eq_signaling.c \ + f128_isSignalingNaN.c \ + f128_le.c \ + f128_le_quiet.c \ + f128_lt.c \ + f128_lt_quiet.c \ + f128_mulAdd.c \ + f128_mul.c \ + f128_rem.c \ + f128_roundToInt.c \ + f128_sqrt.c \ + f128_sub.c \ + f128_to_f16.c \ + f128_to_f32.c \ + f128_to_f64.c \ + f128_to_i32.c \ + f128_to_i32_r_minMag.c \ + f128_to_i64.c \ + f128_to_i64_r_minMag.c \ + f128_to_ui32.c \ + f128_to_ui32_r_minMag.c \ + f128_to_ui64.c \ + f128_to_ui64_r_minMag.c \ + f16_add.c \ + f16_classify.c \ + f16_div.c \ + f16_eq.c \ + f16_eq_signaling.c \ + f16_isSignalingNaN.c \ + f16_le.c \ + f16_le_quiet.c \ + f16_lt.c \ + f16_lt_quiet.c \ + f16_mulAdd.c \ + f16_mul.c \ + f16_rem.c \ + f16_roundToInt.c \ + f16_sqrt.c \ + f16_sub.c \ + f16_to_f128.c \ + f16_to_f32.c \ + f16_to_f64.c \ + f16_to_i8.c \ + f16_to_i16.c \ + f16_to_i32.c \ + f16_to_i32_r_minMag.c \ + f16_to_i64.c \ + f16_to_i64_r_minMag.c \ + f16_to_ui8.c \ + f16_to_ui16.c \ + f16_to_ui32.c \ + f16_to_ui32_r_minMag.c \ + f16_to_ui64.c \ + f16_to_ui64_r_minMag.c \ + f32_add.c \ + f32_classify.c \ + f32_div.c \ + f32_eq.c \ + f32_eq_signaling.c \ + f32_isSignalingNaN.c \ + f32_le.c \ + f32_le_quiet.c \ + f32_lt.c \ + f32_lt_quiet.c \ + f32_mulAdd.c \ + f32_mul.c \ + f32_rem.c \ + f32_roundToInt.c \ + f32_sqrt.c \ + f32_sub.c \ + f32_to_f128.c \ + f32_to_f16.c \ + f32_to_f64.c \ + f32_to_i16.c \ + f32_to_i32.c \ + f32_to_i32_r_minMag.c \ + f32_to_i64.c \ + f32_to_i64_r_minMag.c \ + f32_to_ui16.c \ + f32_to_ui32.c \ + f32_to_ui32_r_minMag.c \ + f32_to_ui64.c \ + f32_to_ui64_r_minMag.c \ + f64_add.c \ + f64_classify.c \ + f64_div.c \ + f64_eq.c \ + f64_eq_signaling.c \ + f64_isSignalingNaN.c \ + f64_le.c \ + f64_le_quiet.c \ + f64_lt.c \ + f64_lt_quiet.c \ + f64_mulAdd.c \ + f64_mul.c \ + f64_rem.c \ + f64_roundToInt.c \ + f64_sqrt.c \ + f64_sub.c \ + f64_to_f128.c \ + f64_to_f16.c \ + f64_to_f32.c \ + f64_to_i32.c \ + f64_to_i32_r_minMag.c \ + f64_to_i64.c \ + f64_to_i64_r_minMag.c \ + f64_to_ui32.c \ + f64_to_ui32_r_minMag.c \ + f64_to_ui64.c \ + f64_to_ui64_r_minMag.c \ + fall_maxmin.c \ + fall_reciprocal.c \ + i32_to_f128.c \ + i32_to_f16.c \ + i32_to_f32.c \ + i32_to_f64.c \ + i64_to_f128.c \ + i64_to_f16.c \ + i64_to_f32.c \ + i64_to_f64.c \ + s_add128.c \ + s_add256M.c \ + s_addCarryM.c \ + s_addComplCarryM.c \ + s_addMagsF128.c \ + s_addMagsF16.c \ + s_addMagsF32.c \ + s_addMagsF64.c \ + s_addM.c \ + s_approxRecip_1Ks.c \ + s_approxRecip32_1.c \ + s_approxRecipSqrt_1Ks.c \ + s_approxRecipSqrt32_1.c \ + s_commonNaNToF32UI.c \ + s_commonNaNToF64UI.c \ + s_compare128M.c \ + s_compare96M.c \ + s_countLeadingZeros16.c \ + s_countLeadingZeros32.c \ + s_countLeadingZeros64.c \ + s_countLeadingZeros8.c \ + s_eq128.c \ + s_f32UIToCommonNaN.c \ + s_f64UIToCommonNaN.c \ + s_le128.c \ + s_lt128.c \ + s_mul128By32.c \ + s_mul128MTo256M.c \ + s_mul128To256M.c \ + s_mul64ByShifted32To128.c \ + s_mul64To128.c \ + s_mul64To128M.c \ + s_mulAddF128.c \ + s_mulAddF16.c \ + s_mulAddF32.c \ + s_mulAddF64.c \ + s_negXM.c \ + s_normRoundPackToF128.c \ + s_normRoundPackToF16.c \ + s_normRoundPackToF32.c \ + s_normRoundPackToF64.c \ + s_normSubnormalF128Sig.c \ + s_normSubnormalF16Sig.c \ + s_normSubnormalF32Sig.c \ + s_normSubnormalF64Sig.c \ + softfloat_raiseFlags.c \ + softfloat_state.c \ + s_propagateNaNF16UI.c \ + s_propagateNaNF32UI.c \ + s_propagateNaNF64UI.c \ + s_propagateNaNF128UI.c \ + s_remStepMBy32.c \ + s_roundMToI64.c \ + s_roundMToUI64.c \ + s_roundPackMToI64.c \ + s_roundPackMToUI64.c \ + s_roundPackToF128.c \ + s_roundPackToF16.c \ + s_roundPackToF32.c \ + s_roundPackToF64.c \ + s_roundPackToI32.c \ + s_roundPackToI64.c \ + s_roundPackToUI32.c \ + s_roundPackToUI64.c \ + s_roundToI32.c \ + s_roundToI64.c \ + s_roundToUI32.c \ + s_roundToUI64.c \ + s_shiftRightJam128.c \ + s_shiftRightJam128Extra.c \ + s_shiftRightJam256M.c \ + s_shiftRightJam32.c \ + s_shiftRightJam64.c \ + s_shiftRightJam64Extra.c \ + s_shortShiftLeft128.c \ + s_shortShiftLeft64To96M.c \ + s_shortShiftRight128.c \ + s_shortShiftRightExtendM.c \ + s_shortShiftRightJam128.c \ + s_shortShiftRightJam128Extra.c \ + s_shortShiftRightJam64.c \ + s_shortShiftRightJam64Extra.c \ + s_shortShiftRightM.c \ + s_sub128.c \ + s_sub1XM.c \ + s_sub256M.c \ + s_subMagsF128.c \ + s_subMagsF16.c \ + s_subMagsF32.c \ + s_subMagsF64.c \ + s_subM.c \ + ui32_to_f128.c \ + ui32_to_f16.c \ + ui32_to_f32.c \ + ui32_to_f64.c \ + ui64_to_f128.c \ + ui64_to_f16.c \ + ui64_to_f32.c \ + ui64_to_f64.c \ + +softfloat_install_shared_lib = yes + +softfloat_test_srcs = + +softfloat_install_prog_srcs = diff --git a/vendor/riscv-isa-sim/softfloat/softfloat_raiseFlags.c b/vendor/riscv-isa-sim/softfloat/softfloat_raiseFlags.c new file mode 100644 index 00000000..f2c25ade --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/softfloat_raiseFlags.c @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014 The Regents of the University of California. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include "platform.h" +#include "softfloat.h" + +/*---------------------------------------------------------------------------- +| Raises the exceptions specified by `flags'. Floating-point traps can be +| defined here if desired. It is currently not possible for such a trap +| to substitute a result value. If traps are not implemented, this routine +| should be simply `softfloat_exceptionFlags |= flags;'. +*----------------------------------------------------------------------------*/ +void softfloat_raiseFlags( uint_fast8_t flags ) +{ + + softfloat_exceptionFlags |= flags; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/softfloat_state.c b/vendor/riscv-isa-sim/softfloat/softfloat_state.c new file mode 100644 index 00000000..a105e6f6 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/softfloat_state.c @@ -0,0 +1,52 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "specialize.h" +#include "softfloat.h" + +#ifndef THREAD_LOCAL +#define THREAD_LOCAL +#endif + +THREAD_LOCAL uint_fast8_t softfloat_roundingMode = softfloat_round_near_even; +THREAD_LOCAL uint_fast8_t softfloat_detectTininess = init_detectTininess; +THREAD_LOCAL uint_fast8_t softfloat_exceptionFlags = 0; + +THREAD_LOCAL uint_fast8_t extF80_roundingPrecision = 80; + diff --git a/vendor/riscv-isa-sim/softfloat/softfloat_types.h b/vendor/riscv-isa-sim/softfloat/softfloat_types.h new file mode 100644 index 00000000..af1888f9 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/softfloat_types.h @@ -0,0 +1,81 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2017 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef softfloat_types_h +#define softfloat_types_h 1 + +#include + +/*---------------------------------------------------------------------------- +| Types used to pass 16-bit, 32-bit, 64-bit, and 128-bit floating-point +| arguments and results to/from functions. These types must be exactly +| 16 bits, 32 bits, 64 bits, and 128 bits in size, respectively. Where a +| platform has "native" support for IEEE-Standard floating-point formats, +| the types below may, if desired, be defined as aliases for the native types +| (typically 'float' and 'double', and possibly 'long double'). +*----------------------------------------------------------------------------*/ +typedef struct { uint16_t v; } float16_t; +typedef struct { uint32_t v; } float32_t; +typedef struct { uint64_t v; } float64_t; +typedef struct { uint64_t v[2]; } float128_t; + +/*---------------------------------------------------------------------------- +| The format of an 80-bit extended floating-point number in memory. This +| structure must contain a 16-bit field named 'signExp' and a 64-bit field +| named 'signif'. +*----------------------------------------------------------------------------*/ +#ifdef LITTLEENDIAN +struct extFloat80M { uint64_t signif; uint16_t signExp; }; +#else +struct extFloat80M { uint16_t signExp; uint64_t signif; }; +#endif + +/*---------------------------------------------------------------------------- +| The type used to pass 80-bit extended floating-point arguments and +| results to/from functions. This type must have size identical to +| 'struct extFloat80M'. Type 'extFloat80_t' can be defined as an alias for +| 'struct extFloat80M'. Alternatively, if a platform has "native" support +| for IEEE-Standard 80-bit extended floating-point, it may be possible, +| if desired, to define 'extFloat80_t' as an alias for the native type +| (presumably either 'long double' or a nonstandard compiler-intrinsic type). +| In that case, the 'signif' and 'signExp' fields of 'struct extFloat80M' +| must align exactly with the locations in memory of the sign, exponent, and +| significand of the native type. +*----------------------------------------------------------------------------*/ +typedef struct extFloat80M extFloat80_t; + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/specialize.h b/vendor/riscv-isa-sim/softfloat/specialize.h new file mode 100644 index 00000000..556476c1 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/specialize.h @@ -0,0 +1,429 @@ + +/*============================================================================ + +This C header file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#ifndef specialize_h +#define specialize_h 1 + +#include +#include +#include "primitiveTypes.h" +#include "softfloat.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*---------------------------------------------------------------------------- +| Default value for `softfloat_detectTininess'. +*----------------------------------------------------------------------------*/ +#define init_detectTininess softfloat_tininess_afterRounding + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 32-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui8_fromPosOverflow 0xFF +#define ui8_fromNegOverflow 0 +#define ui8_fromNaN 0xFF +#define i8_fromPosOverflow 0x7F +#define i8_fromNegOverflow (-0x7F - 1) +#define i8_fromNaN 0x7F + +#define ui16_fromPosOverflow 0xFFFF +#define ui16_fromNegOverflow 0 +#define ui16_fromNaN 0xFFFF +#define i16_fromPosOverflow 0x7FFF +#define i16_fromNegOverflow (-0x7FFF - 1) +#define i16_fromNaN 0x7FFF + +#define ui32_fromPosOverflow 0xFFFFFFFF +#define ui32_fromNegOverflow 0 +#define ui32_fromNaN 0xFFFFFFFF +#define i32_fromPosOverflow 0x7FFFFFFF +#define i32_fromNegOverflow (-0x7FFFFFFF - 1) +#define i32_fromNaN 0x7FFFFFFF + +/*---------------------------------------------------------------------------- +| The values to return on conversions to 64-bit integer formats that raise an +| invalid exception. +*----------------------------------------------------------------------------*/ +#define ui64_fromPosOverflow UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define ui64_fromNegOverflow 0 +#define ui64_fromNaN UINT64_C( 0xFFFFFFFFFFFFFFFF ) +#define i64_fromPosOverflow UINT64_C( 0x7FFFFFFFFFFFFFFF ) +#define i64_fromNegOverflow (-UINT64_C( 0x7FFFFFFFFFFFFFFF ) - 1) +#define i64_fromNaN UINT64_C( 0x7FFFFFFFFFFFFFFF ) + +/*---------------------------------------------------------------------------- +| "Common NaN" structure, used to transfer NaN representations from one format +| to another. +*----------------------------------------------------------------------------*/ +struct commonNaN { char _unused; }; + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 16-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF16UI 0x7E00 + +/*---------------------------------------------------------------------------- +| Returns true when 16-bit unsigned integer `uiA' has the bit pattern of a +| 16-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF16UI( uiA ) ((((uiA) & 0x7E00) == 0x7C00) && ((uiA) & 0x01FF)) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 16-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_f16UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & 0x0200) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 16-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +#define softfloat_commonNaNToF16UI( aPtr ) ((uint_fast16_t) defaultNaNF16UI) + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 16-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast16_t + softfloat_propagateNaNF16UI( uint_fast16_t uiA, uint_fast16_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 32-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF32UI 0x7FC00000 + +/*---------------------------------------------------------------------------- +| Returns true when 32-bit unsigned integer `uiA' has the bit pattern of a +| 32-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF32UI( uiA ) ((((uiA) & 0x7FC00000) == 0x7F800000) && ((uiA) & 0x003FFFFF)) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 32-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_f32UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & 0x00400000) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 32-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +#define softfloat_commonNaNToF32UI( aPtr ) ((uint_fast32_t) defaultNaNF32UI) + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 32-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast32_t + softfloat_propagateNaNF32UI( uint_fast32_t uiA, uint_fast32_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 64-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF64UI UINT64_C( 0x7FF8000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when 64-bit unsigned integer `uiA' has the bit pattern of a +| 64-bit floating-point signaling NaN. +| Note: This macro evaluates its argument more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF64UI( uiA ) ((((uiA) & UINT64_C( 0x7FF8000000000000 )) == UINT64_C( 0x7FF0000000000000 )) && ((uiA) & UINT64_C( 0x0007FFFFFFFFFFFF ))) + +/*---------------------------------------------------------------------------- +| Assuming `uiA' has the bit pattern of a 64-bit floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_f64UIToCommonNaN( uiA, zPtr ) if ( ! ((uiA) & UINT64_C( 0x0008000000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 64-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +#define softfloat_commonNaNToF64UI( aPtr ) ((uint_fast64_t) defaultNaNF64UI) + +/*---------------------------------------------------------------------------- +| Interpreting `uiA' and `uiB' as the bit patterns of two 64-bit floating- +| point values, at least one of which is a NaN, returns the bit pattern of +| the combined NaN result. If either `uiA' or `uiB' has the pattern of a +| signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +uint_fast64_t + softfloat_propagateNaNF64UI( uint_fast64_t uiA, uint_fast64_t uiB ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 80-bit extended floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNExtF80UI64 0x7FFF +#define defaultNaNExtF80UI0 UINT64_C( 0xC000000000000000 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 80-bit unsigned integer formed from concatenating +| 16-bit `uiA64' and 64-bit `uiA0' has the bit pattern of an 80-bit extended +| floating-point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNExtF80UI( uiA64, uiA0 ) ((((uiA64) & 0x7FFF) == 0x7FFF) && ! ((uiA0) & UINT64_C( 0x4000000000000000 )) && ((uiA0) & UINT64_C( 0x3FFFFFFFFFFFFFFF ))) + +#ifdef SOFTFLOAT_FAST_INT64 + +/*---------------------------------------------------------------------------- +| The following functions are needed only when `SOFTFLOAT_FAST_INT64' is +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| Assuming the unsigned integer formed from concatenating `uiA64' and `uiA0' +| has the bit pattern of an 80-bit extended floating-point NaN, converts +| this NaN to the common NaN form, and stores the resulting common NaN at the +| location pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_extF80UIToCommonNaN( uiA64, uiA0, zPtr ) if ( ! ((uiA0) & UINT64_C( 0x4000000000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into an 80-bit extended +| floating-point NaN, and returns the bit pattern of this value as an unsigned +| integer. +*----------------------------------------------------------------------------*/ +#if defined INLINE && ! defined softfloat_commonNaNToExtF80UI +INLINE +struct uint128 softfloat_commonNaNToExtF80UI( const struct commonNaN *aPtr ) +{ + struct uint128 uiZ; + uiZ.v64 = defaultNaNExtF80UI64; + uiZ.v0 = defaultNaNExtF80UI0; + return uiZ; +} +#else +struct uint128 softfloat_commonNaNToExtF80UI( const struct commonNaN *aPtr ); +#endif + +/*---------------------------------------------------------------------------- +| Interpreting the unsigned integer formed from concatenating `uiA64' and +| `uiA0' as an 80-bit extended floating-point value, and likewise interpreting +| the unsigned integer formed from concatenating `uiB64' and `uiB0' as another +| 80-bit extended floating-point value, and assuming at least on of these +| floating-point values is a NaN, returns the bit pattern of the combined NaN +| result. If either original floating-point value is a signaling NaN, the +| invalid exception is raised. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_propagateNaNExtF80UI( + uint_fast16_t uiA64, + uint_fast64_t uiA0, + uint_fast16_t uiB64, + uint_fast64_t uiB0 + ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 128-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF128UI64 UINT64_C( 0x7FFF800000000000 ) +#define defaultNaNF128UI0 UINT64_C( 0 ) + +/*---------------------------------------------------------------------------- +| Returns true when the 128-bit unsigned integer formed from concatenating +| 64-bit `uiA64' and 64-bit `uiA0' has the bit pattern of a 128-bit floating- +| point signaling NaN. +| Note: This macro evaluates its arguments more than once. +*----------------------------------------------------------------------------*/ +#define softfloat_isSigNaNF128UI( uiA64, uiA0 ) ((((uiA64) & UINT64_C( 0x7FFF800000000000 )) == UINT64_C( 0x7FFF000000000000 )) && ((uiA0) || ((uiA64) & UINT64_C( 0x00007FFFFFFFFFFF )))) + +/*---------------------------------------------------------------------------- +| Assuming the unsigned integer formed from concatenating `uiA64' and `uiA0' +| has the bit pattern of a 128-bit floating-point NaN, converts this NaN to +| the common NaN form, and stores the resulting common NaN at the location +| pointed to by `zPtr'. If the NaN is a signaling NaN, the invalid exception +| is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_f128UIToCommonNaN( uiA64, uiA0, zPtr ) if ( ! ((uiA64) & UINT64_C( 0x0000800000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 128-bit floating-point +| NaN, and returns the bit pattern of this value as an unsigned integer. +*----------------------------------------------------------------------------*/ +#if defined INLINE && ! defined softfloat_commonNaNToF128UI +INLINE +struct uint128 softfloat_commonNaNToF128UI( const struct commonNaN *aPtr ) +{ + struct uint128 uiZ; + uiZ.v64 = defaultNaNF128UI64; + uiZ.v0 = defaultNaNF128UI0; + return uiZ; +} +#else +struct uint128 softfloat_commonNaNToF128UI( const struct commonNaN * ); +#endif + +/*---------------------------------------------------------------------------- +| Interpreting the unsigned integer formed from concatenating `uiA64' and +| `uiA0' as a 128-bit floating-point value, and likewise interpreting the +| unsigned integer formed from concatenating `uiB64' and `uiB0' as another +| 128-bit floating-point value, and assuming at least on of these floating- +| point values is a NaN, returns the bit pattern of the combined NaN result. +| If either original floating-point value is a signaling NaN, the invalid +| exception is raised. +*----------------------------------------------------------------------------*/ +struct uint128 + softfloat_propagateNaNF128UI( + uint_fast64_t uiA64, + uint_fast64_t uiA0, + uint_fast64_t uiB64, + uint_fast64_t uiB0 + ); + +#else + +/*---------------------------------------------------------------------------- +| The following functions are needed only when `SOFTFLOAT_FAST_INT64' is not +| defined. +*----------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- +| Assuming the 80-bit extended floating-point value pointed to by `aSPtr' is +| a NaN, converts this NaN to the common NaN form, and stores the resulting +| common NaN at the location pointed to by `zPtr'. If the NaN is a signaling +| NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +#define softfloat_extF80MToCommonNaN( aSPtr, zPtr ) if ( ! ((aSPtr)->signif & UINT64_C( 0x4000000000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into an 80-bit extended +| floating-point NaN, and stores this NaN at the location pointed to by +| `zSPtr'. +*----------------------------------------------------------------------------*/ +#if defined INLINE && ! defined softfloat_commonNaNToExtF80M +INLINE +void + softfloat_commonNaNToExtF80M( + const struct commonNaN *aPtr, struct extFloat80M *zSPtr ) +{ + zSPtr->signExp = defaultNaNExtF80UI64; + zSPtr->signif = defaultNaNExtF80UI0; +} +#else +void + softfloat_commonNaNToExtF80M( + const struct commonNaN *aPtr, struct extFloat80M *zSPtr ); +#endif + +/*---------------------------------------------------------------------------- +| Assuming at least one of the two 80-bit extended floating-point values +| pointed to by `aSPtr' and `bSPtr' is a NaN, stores the combined NaN result +| at the location pointed to by `zSPtr'. If either original floating-point +| value is a signaling NaN, the invalid exception is raised. +*----------------------------------------------------------------------------*/ +void + softfloat_propagateNaNExtF80M( + const struct extFloat80M *aSPtr, + const struct extFloat80M *bSPtr, + struct extFloat80M *zSPtr + ); + +/*---------------------------------------------------------------------------- +| The bit pattern for a default generated 128-bit floating-point NaN. +*----------------------------------------------------------------------------*/ +#define defaultNaNF128UI96 0x7FFF8000 +#define defaultNaNF128UI64 0 +#define defaultNaNF128UI32 0 +#define defaultNaNF128UI0 0 + +/*---------------------------------------------------------------------------- +| Assuming the 128-bit floating-point value pointed to by `aWPtr' is a NaN, +| converts this NaN to the common NaN form, and stores the resulting common +| NaN at the location pointed to by `zPtr'. If the NaN is a signaling NaN, +| the invalid exception is raised. Argument `aWPtr' points to an array of +| four 32-bit elements that concatenate in the platform's normal endian order +| to form a 128-bit floating-point value. +*----------------------------------------------------------------------------*/ +#define softfloat_f128MToCommonNaN( aWPtr, zPtr ) if ( ! ((aWPtr)[indexWordHi( 4 )] & UINT64_C( 0x0000800000000000 )) ) softfloat_raiseFlags( softfloat_flag_invalid ) + +/*---------------------------------------------------------------------------- +| Converts the common NaN pointed to by `aPtr' into a 128-bit floating-point +| NaN, and stores this NaN at the location pointed to by `zWPtr'. Argument +| `zWPtr' points to an array of four 32-bit elements that concatenate in the +| platform's normal endian order to form a 128-bit floating-point value. +*----------------------------------------------------------------------------*/ +#if defined INLINE && ! defined softfloat_commonNaNToF128M +INLINE +void + softfloat_commonNaNToF128M( const struct commonNaN *aPtr, uint32_t *zWPtr ) +{ + zWPtr[indexWord( 4, 3 )] = defaultNaNF128UI96; + zWPtr[indexWord( 4, 2 )] = defaultNaNF128UI64; + zWPtr[indexWord( 4, 1 )] = defaultNaNF128UI32; + zWPtr[indexWord( 4, 0 )] = defaultNaNF128UI0; +} +#else +void + softfloat_commonNaNToF128M( const struct commonNaN *aPtr, uint32_t *zWPtr ); +#endif + +/*---------------------------------------------------------------------------- +| Assuming at least one of the two 128-bit floating-point values pointed to by +| `aWPtr' and `bWPtr' is a NaN, stores the combined NaN result at the location +| pointed to by `zWPtr'. If either original floating-point value is a +| signaling NaN, the invalid exception is raised. Each of `aWPtr', `bWPtr', +| and `zWPtr' points to an array of four 32-bit elements that concatenate in +| the platform's normal endian order to form a 128-bit floating-point value. +*----------------------------------------------------------------------------*/ +void + softfloat_propagateNaNF128M( + const uint32_t *aWPtr, const uint32_t *bWPtr, uint32_t *zWPtr ); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/vendor/riscv-isa-sim/softfloat/ui32_to_f128.c b/vendor/riscv-isa-sim/softfloat/ui32_to_f128.c new file mode 100644 index 00000000..78d3eb64 --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/ui32_to_f128.c @@ -0,0 +1,60 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t ui32_to_f128( uint32_t a ) +{ + uint_fast64_t uiZ64; + int_fast8_t shiftDist; + union ui128_f128 uZ; + + uiZ64 = 0; + if ( a ) { + shiftDist = softfloat_countLeadingZeros32( a ) + 17; + uiZ64 = + packToF128UI64( + 0, 0x402E - shiftDist, (uint_fast64_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t ui32_to_f16( uint32_t a ) +{ + int_fast8_t shiftDist; + union ui16_f16 u; + uint_fast16_t sig; + + shiftDist = softfloat_countLeadingZeros32( a ) - 21; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF16UI( + 0, 0x18 - shiftDist, (uint_fast16_t) a<>(-shiftDist) | ((uint32_t) (a<<(shiftDist & 31)) != 0) + : (uint_fast16_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui32_to_f32( uint32_t a ) +{ + union ui32_f32 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & 0x80000000 ) { + return softfloat_roundPackToF32( 0, 0x9D, a>>1 | (a & 1) ); + } else { + return softfloat_normRoundPackToF32( 0, 0x9C, a ); + } + +} + diff --git a/vendor/riscv-isa-sim/softfloat/ui32_to_f64.c b/vendor/riscv-isa-sim/softfloat/ui32_to_f64.c new file mode 100644 index 00000000..5e5f843a --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/ui32_to_f64.c @@ -0,0 +1,59 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui32_to_f64( uint32_t a ) +{ + uint_fast64_t uiZ; + int_fast8_t shiftDist; + union ui64_f64 uZ; + + if ( ! a ) { + uiZ = 0; + } else { + shiftDist = softfloat_countLeadingZeros32( a ) + 21; + uiZ = + packToF64UI( 0, 0x432 - shiftDist, (uint_fast64_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float128_t ui64_to_f128( uint64_t a ) +{ + uint_fast64_t uiZ64, uiZ0; + int_fast8_t shiftDist; + struct uint128 zSig; + union ui128_f128 uZ; + + if ( ! a ) { + uiZ64 = 0; + uiZ0 = 0; + } else { + shiftDist = softfloat_countLeadingZeros64( a ) + 49; + if ( 64 <= shiftDist ) { + zSig.v64 = a<<(shiftDist - 64); + zSig.v0 = 0; + } else { + zSig = softfloat_shortShiftLeft128( 0, a, shiftDist ); + } + uiZ64 = packToF128UI64( 0, 0x406E - shiftDist, zSig.v64 ); + uiZ0 = zSig.v0; + } + uZ.ui.v64 = uiZ64; + uZ.ui.v0 = uiZ0; + return uZ.f; + +} + diff --git a/vendor/riscv-isa-sim/softfloat/ui64_to_f16.c b/vendor/riscv-isa-sim/softfloat/ui64_to_f16.c new file mode 100644 index 00000000..ecca02bc --- /dev/null +++ b/vendor/riscv-isa-sim/softfloat/ui64_to_f16.c @@ -0,0 +1,64 @@ + +/*============================================================================ + +This C source file is part of the SoftFloat IEEE Floating-Point Arithmetic +Package, Release 3d, by John R. Hauser. + +Copyright 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of +California. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions, and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the University nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE +DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +=============================================================================*/ + +#include +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float16_t ui64_to_f16( uint64_t a ) +{ + int_fast8_t shiftDist; + union ui16_f16 u; + uint_fast16_t sig; + + shiftDist = softfloat_countLeadingZeros64( a ) - 53; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF16UI( + 0, 0x18 - shiftDist, (uint_fast16_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float32_t ui64_to_f32( uint64_t a ) +{ + int_fast8_t shiftDist; + union ui32_f32 u; + uint_fast32_t sig; + + shiftDist = softfloat_countLeadingZeros64( a ) - 40; + if ( 0 <= shiftDist ) { + u.ui = + a ? packToF32UI( + 0, 0x95 - shiftDist, (uint_fast32_t) a< +#include "platform.h" +#include "internals.h" +#include "softfloat.h" + +float64_t ui64_to_f64( uint64_t a ) +{ + union ui64_f64 uZ; + + if ( ! a ) { + uZ.ui = 0; + return uZ.f; + } + if ( a & UINT64_C( 0x8000000000000000 ) ) { + return + softfloat_roundPackToF64( + 0, 0x43D, softfloat_shortShiftRightJam64( a, 1 ) ); + } else { + return softfloat_normRoundPackToF64( 0, 0x43C, a ); + } + +} + diff --git a/vendor/riscv-isa-sim/spike_dasm/spike-dasm.cc b/vendor/riscv-isa-sim/spike_dasm/spike-dasm.cc new file mode 100644 index 00000000..c4fc840f --- /dev/null +++ b/vendor/riscv-isa-sim/spike_dasm/spike-dasm.cc @@ -0,0 +1,70 @@ +// See LICENSE for license details. + +// This little program finds occurrences of strings like +// DASM(ffabc013) +// in its input, then replaces them with the disassembly +// enclosed hexadecimal number, interpreted as a RISC-V +// instruction. + +#include "disasm.h" +#include "extension.h" +#include +#include +#include +#include +using namespace std; + +int main(int argc, char** argv) +{ + string s; + const char* isa = DEFAULT_ISA; + + std::function extension; + option_parser_t parser; +#ifdef HAVE_DLOPEN + parser.option(0, "extension", 1, [&](const char* s){extension = find_extension(s);}); +#endif + parser.option(0, "isa", 1, [&](const char* s){isa = s;}); + parser.parse(argv); + + isa_parser_t isa_parser(isa, DEFAULT_PRIV); + disassembler_t* disassembler = new disassembler_t(&isa_parser); + if (extension) { + for (auto disasm_insn : extension()->get_disasms()) { + disassembler->add_insn(disasm_insn); + } + } + + while (getline(cin, s)) + { + for (size_t pos = 0; (pos = s.find("DASM(", pos)) != string::npos; ) + { + size_t start = pos; + + pos += strlen("DASM("); + + if (s[pos] == '0' && (s[pos+1] == 'x' || s[pos+1] == 'X')) + pos += 2; + + if (!isxdigit(s[pos])) + continue; + + char* endp; + int64_t bits = strtoull(&s[pos], &endp, 16); + if (*endp != ')') + continue; + + size_t nbits = 4 * (endp - &s[pos]); + if (nbits < 64) + bits = bits << (64 - nbits) >> (64 - nbits); + + string dis = disassembler->disassemble(bits); + s = s.substr(0, start) + dis + s.substr(endp - &s[0] + 1); + pos = start + dis.length(); + } + + cout << s << '\n'; + } + + return 0; +} diff --git a/vendor/riscv-isa-sim/spike_dasm/spike_dasm.ac b/vendor/riscv-isa-sim/spike_dasm/spike_dasm.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/spike_dasm/spike_dasm.mk.in b/vendor/riscv-isa-sim/spike_dasm/spike_dasm.mk.in new file mode 100644 index 00000000..0233e62e --- /dev/null +++ b/vendor/riscv-isa-sim/spike_dasm/spike_dasm.mk.in @@ -0,0 +1,10 @@ +spike_dasm_subproject_deps = \ + disasm \ + softfloat \ + $(if $(HAVE_DLOPEN),riscv,) \ + +spike_dasm_srcs = \ + spike_dasm_option_parser.cc \ + +spike_dasm_install_prog_srcs = \ + spike-dasm.cc \ diff --git a/vendor/riscv-isa-sim/spike_dasm/spike_dasm_option_parser.cc b/vendor/riscv-isa-sim/spike_dasm/spike_dasm_option_parser.cc new file mode 100644 index 00000000..72daec40 --- /dev/null +++ b/vendor/riscv-isa-sim/spike_dasm/spike_dasm_option_parser.cc @@ -0,0 +1,51 @@ +// See LICENSE for license details. + +#include "option_parser.h" +#include +#include +#include +#include + +void option_parser_t::option(char c, const char* s, int arg, std::function action) +{ + opts.push_back(option_t(c, s, arg, action)); +} + +const char* const* option_parser_t::parse(const char* const* argv0) +{ + assert(argv0); + const char* const* argv = argv0 + 1; + for (const char* opt; (opt = *argv) != NULL && opt[0] == '-'; argv++) + { + bool found = false; + for (auto it = opts.begin(); !found && it != opts.end(); it++) + { + size_t slen = it->str ? strlen(it->str) : 0; + bool chr_match = opt[1] != '-' && it->chr && opt[1] == it->chr; + bool str_match = opt[1] == '-' && slen && strncmp(opt+2, it->str, slen) == 0; + if (chr_match || (str_match && (opt[2+slen] == '=' || opt[2+slen] == '\0'))) + { + const char* optarg = + chr_match ? (opt[2] ? &opt[2] : NULL) : + opt[2+slen] ? &opt[3+slen] : + it->arg ? *(++argv) : NULL; + if (optarg && !it->arg) + error("no argument allowed for option", *argv0, opt); + if (!optarg && it->arg) + error("argument required for option", *argv0, opt); + it->func(optarg); + found = true; + } + } + if (!found) + error("unrecognized option", *argv0, opt); + } + return argv; +} + +void option_parser_t::error(const char* msg, const char* argv0, const char* arg) +{ + fprintf(stderr, "%s: %s %s\n", argv0, msg, arg ? arg : ""); + if (helpmsg) helpmsg(); + exit(1); +} diff --git a/vendor/riscv-isa-sim/spike_main/spike-log-parser.cc b/vendor/riscv-isa-sim/spike_main/spike-log-parser.cc new file mode 100644 index 00000000..fab00f08 --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/spike-log-parser.cc @@ -0,0 +1,61 @@ +// See LICENSE for license details. + +// This little program finds occurrences of strings like +// core 0: 0x000000008000c36c (0xfe843783) ld a5, -24(s0) +// in its inputs, then output the RISC-V instruction with the disassembly +// enclosed hexadecimal number. + +#include +#include +#include +#include +#include "fesvr/option_parser.h" + +#include "disasm.h" +#include "extension.h" + +using namespace std; + +int main(int argc, char** argv) +{ + string s; + const char* isa_string = DEFAULT_ISA; + + std::function extension; + option_parser_t parser; + parser.option(0, "extension", 1, [&](const char* s){extension = find_extension(s);}); + parser.option(0, "isa", 1, [&](const char* s){isa_string = s;}); + parser.parse(argv); + + isa_parser_t isa(isa_string, DEFAULT_PRIV); + processor_t p(&isa, DEFAULT_VARCH, 0, 0, false, nullptr, cerr); + if (extension) { + p.register_extension(extension()); + } + + std::regex reg("^core\\s+\\d+:\\s+0x[0-9a-f]+\\s+\\(0x([0-9a-f]+)\\)", std::regex_constants::icase); + std::smatch m; + std::ssub_match sm ; + + while (getline(cin,s)){ + if (regex_search(s, m, reg)){ + // the opcode string + string op = m[1].str(); + uint32_t bit_num = op.size() * 4; + uint64_t opcode = strtoull(op.c_str(), nullptr, 16); + + if (bit_num<64){ + opcode = opcode << (64-bit_num) >> (64-bit_num); + } + + const disasm_insn_t* disasm = p.get_disassembler()->lookup(opcode); + if (disasm) { + cout << disasm->get_name() << '\n'; + } else { + cout << "unknown_op\n"; + } + } + } + + return 0; +} diff --git a/vendor/riscv-isa-sim/spike_main/spike.cc b/vendor/riscv-isa-sim/spike_main/spike.cc new file mode 100644 index 00000000..55290452 --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/spike.cc @@ -0,0 +1,530 @@ +// See LICENSE for license details. + +#include "cfg.h" +#include "sim.h" +#include "mmu.h" +#include "remote_bitbang.h" +#include "cachesim.h" +#include "extension.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include "../VERSION" + +static void help(int exit_code = 1) +{ + fprintf(stderr, "Spike RISC-V ISA Simulator " SPIKE_VERSION "\n\n"); + fprintf(stderr, "usage: spike [host options] [target options]\n"); + fprintf(stderr, "Host Options:\n"); + fprintf(stderr, " -p Simulate processors [default 1]\n"); + fprintf(stderr, " -m Provide MiB of target memory [default 2048]\n"); + fprintf(stderr, " -m Provide memory regions of size m and n bytes\n"); + fprintf(stderr, " at base addresses a and b (with 4 KiB alignment)\n"); + fprintf(stderr, " -d Interactive debug mode\n"); + fprintf(stderr, " -g Track histogram of PCs\n"); + fprintf(stderr, " -l Generate a log of execution\n"); +#ifdef HAVE_BOOST_ASIO + fprintf(stderr, " -s Command I/O via socket (use with -d)\n"); +#endif + fprintf(stderr, " -h, --help Print this help message\n"); + fprintf(stderr, " -H Start halted, allowing a debugger to connect\n"); + fprintf(stderr, " --log= File name for option -l\n"); + fprintf(stderr, " --debug-cmd= Read commands from file (use with -d)\n"); + fprintf(stderr, " --isa= RISC-V ISA string [default %s]\n", DEFAULT_ISA); + fprintf(stderr, " --priv= RISC-V privilege modes supported [default %s]\n", DEFAULT_PRIV); + fprintf(stderr, " --varch= RISC-V Vector uArch string [default %s]\n", DEFAULT_VARCH); + fprintf(stderr, " --pc=
Override ELF entry point\n"); + fprintf(stderr, " --hartids= Explicitly specify hartids, default is 0,1,...\n"); + fprintf(stderr, " --ic=:: Instantiate a cache model with S sets,\n"); + fprintf(stderr, " --dc=:: W ways, and B-byte blocks (with S and\n"); + fprintf(stderr, " --l2=:: B both powers of 2).\n"); + fprintf(stderr, " --device= Attach MMIO plugin device from an --extlib library\n"); + fprintf(stderr, " P -- Name of the MMIO plugin\n"); + fprintf(stderr, " B -- Base memory address of the device\n"); + fprintf(stderr, " A -- String arguments to pass to the plugin\n"); + fprintf(stderr, " This flag can be used multiple times.\n"); + fprintf(stderr, " The extlib flag for the library must come first.\n"); + fprintf(stderr, " --log-cache-miss Generate a log of cache miss\n"); + fprintf(stderr, " --extension= Specify RoCC Extension\n"); + fprintf(stderr, " This flag can be used multiple times.\n"); + fprintf(stderr, " --extlib= Shared library to load\n"); + fprintf(stderr, " This flag can be used multiple times.\n"); + fprintf(stderr, " --rbb-port= Listen on for remote bitbang connection\n"); + fprintf(stderr, " --dump-dts Print device tree string and exit\n"); + fprintf(stderr, " --disable-dtb Don't write the device tree blob into memory\n"); + fprintf(stderr, " --kernel= Load kernel flat image into memory\n"); + fprintf(stderr, " --initrd= Load kernel initrd into memory\n"); + fprintf(stderr, " --bootargs= Provide custom bootargs for kernel [default: console=hvc0 earlycon=sbi]\n"); + fprintf(stderr, " --real-time-clint Increment clint time at real-time rate\n"); + fprintf(stderr, " --dm-progsize= Progsize for the debug module [default 2]\n"); + fprintf(stderr, " --dm-sba= Debug system bus access supports up to " + " wide accesses [default 0]\n"); + fprintf(stderr, " --dm-auth Debug module requires debugger to authenticate\n"); + fprintf(stderr, " --dmi-rti= Number of Run-Test/Idle cycles " + "required for a DMI access [default 0]\n"); + fprintf(stderr, " --dm-abstract-rti= Number of Run-Test/Idle cycles " + "required for an abstract command to execute [default 0]\n"); + fprintf(stderr, " --dm-no-hasel Debug module supports hasel\n"); + fprintf(stderr, " --dm-no-abstract-csr Debug module won't support abstract to authenticate\n"); + fprintf(stderr, " --dm-no-halt-groups Debug module won't support halt groups\n"); + fprintf(stderr, " --dm-no-impebreak Debug module won't support implicit ebreak in program buffer\n"); + fprintf(stderr, " --blocksz= Cache block size (B) for CMO operations(powers of 2) [default 64]\n"); + + exit(exit_code); +} + +static void suggest_help() +{ + fprintf(stderr, "Try 'spike --help' for more information.\n"); + exit(1); +} + +static bool check_file_exists(const char *fileName) +{ + std::ifstream infile(fileName); + return infile.good(); +} + +static std::ifstream::pos_type get_file_size(const char *filename) +{ + std::ifstream in(filename, std::ios::ate | std::ios::binary); + return in.tellg(); +} + +static void read_file_bytes(const char *filename,size_t fileoff, + mem_t* mem, size_t memoff, size_t read_sz) +{ + std::ifstream in(filename, std::ios::in | std::ios::binary); + in.seekg(fileoff, std::ios::beg); + + std::vector read_buf(read_sz, 0); + in.read(&read_buf[0], read_sz); + mem->store(memoff, read_sz, (uint8_t*)&read_buf[0]); +} + +bool sort_mem_region(const mem_cfg_t &a, const mem_cfg_t &b) +{ + if (a.base == b.base) + return (a.size < b.size); + else + return (a.base < b.base); +} + +void merge_overlapping_memory_regions(std::vector &mems) +{ + // check the user specified memory regions and merge the overlapping or + // eliminate the containing parts + assert(!mems.empty()); + + std::sort(mems.begin(), mems.end(), sort_mem_region); + for (auto it = mems.begin() + 1; it != mems.end(); ) { + reg_t start = prev(it)->base; + reg_t end = prev(it)->base + prev(it)->size; + reg_t start2 = it->base; + reg_t end2 = it->base + it->size; + + //contains -> remove + if (start2 >= start && end2 <= end) { + it = mems.erase(it); + //partial overlapped -> extend + } else if (start2 >= start && start2 < end) { + prev(it)->size = std::max(end, end2) - start; + it = mems.erase(it); + // no overlapping -> keep it + } else { + it++; + } + } +} + +static std::vector parse_mem_layout(const char* arg) +{ + std::vector res; + + // handle legacy mem argument + char* p; + auto mb = strtoull(arg, &p, 0); + if (*p == 0) { + reg_t size = reg_t(mb) << 20; + if (size != (size_t)size) + throw std::runtime_error("Size would overflow size_t"); + res.push_back(mem_cfg_t(reg_t(DRAM_BASE), size)); + return res; + } + + // handle base/size tuples + while (true) { + auto base = strtoull(arg, &p, 0); + if (!*p || *p != ':') + help(); + auto size = strtoull(p + 1, &p, 0); + + // page-align base and size + auto base0 = base, size0 = size; + size += base0 % PGSIZE; + base -= base0 % PGSIZE; + if (size % PGSIZE != 0) + size += PGSIZE - size % PGSIZE; + + if (base + size < base) + help(); + + if (size != size0) { + fprintf(stderr, "Warning: the memory at [0x%llX, 0x%llX] has been realigned\n" + "to the %ld KiB page size: [0x%llX, 0x%llX]\n", + base0, base0 + size0 - 1, long(PGSIZE / 1024), base, base + size - 1); + } + + res.push_back(mem_cfg_t(reg_t(base), reg_t(size))); + if (!*p) + break; + if (*p != ',') + help(); + arg = p + 1; + } + + merge_overlapping_memory_regions(res); + + return res; +} + +static std::vector> make_mems(const std::vector &layout) +{ + std::vector> mems; + mems.reserve(layout.size()); + for (const auto &cfg : layout) { + mems.push_back(std::make_pair(cfg.base, new mem_t(cfg.size))); + } + return mems; +} + +static unsigned long atoul_safe(const char* s) +{ + char* e; + auto res = strtoul(s, &e, 10); + if (*e) + help(); + return res; +} + +static unsigned long atoul_nonzero_safe(const char* s) +{ + auto res = atoul_safe(s); + if (!res) + help(); + return res; +} + +static std::vector parse_hartids(const char *s) +{ + std::string const str(s); + std::stringstream stream(str); + std::vector hartids; + + int n; + while (stream >> n) { + hartids.push_back(n); + if (stream.peek() == ',') stream.ignore(); + } + + return hartids; +} + +int main(int argc, char** argv) +{ + bool debug = false; + bool halted = false; + bool histogram = false; + bool log = false; + bool socket = false; // command line option -s + bool dump_dts = false; + bool dtb_enabled = true; + const char* kernel = NULL; + reg_t kernel_offset, kernel_size; + std::vector> plugin_devices; + std::unique_ptr ic; + std::unique_ptr dc; + std::unique_ptr l2; + bool log_cache = false; + bool log_commits = false; + const char *log_path = nullptr; + std::vector> extensions; + const char* initrd = NULL; + const char* dtb_file = NULL; + uint16_t rbb_port = 0; + bool use_rbb = false; + unsigned dmi_rti = 0; + reg_t blocksz = 64; + debug_module_config_t dm_config = { + .progbufsize = 2, + .max_sba_data_width = 0, + .require_authentication = false, + .abstract_rti = 0, + .support_hasel = true, + .support_abstract_csr_access = true, + .support_haltgroups = true, + .support_impebreak = true + }; + cfg_arg_t nprocs(1); + + cfg_t cfg(/*default_initrd_bounds=*/std::make_pair((reg_t)0, (reg_t)0), + /*default_bootargs=*/nullptr, + /*default_isa=*/DEFAULT_ISA, + /*default_priv=*/DEFAULT_PRIV, + /*default_varch=*/DEFAULT_VARCH, + /*default_mem_layout=*/parse_mem_layout("2048"), + /*default_hartids=*/std::vector(), + /*default_real_time_clint=*/false); + + auto const device_parser = [&plugin_devices](const char *s) { + const std::string str(s); + std::istringstream stream(str); + + // We are parsing a string like name,base,args. + + // Parse the name, which is simply all of the characters leading up to the + // first comma. The validity of the plugin name will be checked later. + std::string name; + std::getline(stream, name, ','); + if (name.empty()) { + throw std::runtime_error("Plugin name is empty."); + } + + // Parse the base address. First, get all of the characters up to the next + // comma (or up to the end of the string if there is no comma). Then try to + // parse that string as an integer according to the rules of strtoull. It + // could be in decimal, hex, or octal. Fail if we were able to parse a + // number but there were garbage characters after the valid number. We must + // consume the entire string between the commas. + std::string base_str; + std::getline(stream, base_str, ','); + if (base_str.empty()) { + throw std::runtime_error("Device base address is empty."); + } + char* end; + reg_t base = static_cast(strtoull(base_str.c_str(), &end, 0)); + if (end != &*base_str.cend()) { + throw std::runtime_error("Error parsing device base address."); + } + + // The remainder of the string is the arguments. We could use getline, but + // that could ignore newline characters in the arguments. That should be + // rare and discouraged, but handle it here anyway with this weird in_avail + // technique. The arguments are optional, so if there were no arguments + // specified we could end up with an empty string here. That's okay. + auto avail = stream.rdbuf()->in_avail(); + std::string args(avail, '\0'); + stream.readsome(&args[0], avail); + + plugin_devices.emplace_back(base, new mmio_plugin_device_t(name, args)); + }; + + option_parser_t parser; + parser.help(&suggest_help); + parser.option('h', "help", 0, [&](const char* s){help(0);}); + parser.option('d', 0, 0, [&](const char* s){debug = true;}); + parser.option('g', 0, 0, [&](const char* s){histogram = true;}); + parser.option('l', 0, 0, [&](const char* s){log = true;}); +#ifdef HAVE_BOOST_ASIO + parser.option('s', 0, 0, [&](const char* s){socket = true;}); +#endif + parser.option('p', 0, 1, [&](const char* s){nprocs = atoul_nonzero_safe(s);}); + parser.option('m', 0, 1, [&](const char* s){cfg.mem_layout = parse_mem_layout(s);}); + // I wanted to use --halted, but for some reason that doesn't work. + parser.option('H', 0, 0, [&](const char* s){halted = true;}); + parser.option(0, "rbb-port", 1, [&](const char* s){use_rbb = true; rbb_port = atoul_safe(s);}); + parser.option(0, "pc", 1, [&](const char* s){cfg.start_pc = strtoull(s, 0, 0);}); + parser.option(0, "hartids", 1, [&](const char* s){ + cfg.hartids = parse_hartids(s); + cfg.explicit_hartids = true; + }); + parser.option(0, "ic", 1, [&](const char* s){ic.reset(new icache_sim_t(s));}); + parser.option(0, "dc", 1, [&](const char* s){dc.reset(new dcache_sim_t(s));}); + parser.option(0, "l2", 1, [&](const char* s){l2.reset(cache_sim_t::construct(s, "L2$"));}); + parser.option(0, "log-cache-miss", 0, [&](const char* s){log_cache = true;}); + parser.option(0, "isa", 1, [&](const char* s){cfg.isa = s;}); + parser.option(0, "priv", 1, [&](const char* s){cfg.priv = s;}); + parser.option(0, "varch", 1, [&](const char* s){cfg.varch = s;}); + parser.option(0, "device", 1, device_parser); + parser.option(0, "extension", 1, [&](const char* s){extensions.push_back(find_extension(s));}); + parser.option(0, "dump-dts", 0, [&](const char *s){dump_dts = true;}); + parser.option(0, "disable-dtb", 0, [&](const char *s){dtb_enabled = false;}); + parser.option(0, "dtb", 1, [&](const char *s){dtb_file = s;}); + parser.option(0, "kernel", 1, [&](const char* s){kernel = s;}); + parser.option(0, "initrd", 1, [&](const char* s){initrd = s;}); + parser.option(0, "bootargs", 1, [&](const char* s){cfg.bootargs = s;}); + parser.option(0, "real-time-clint", 0, [&](const char *s){cfg.real_time_clint = true;}); + parser.option(0, "extlib", 1, [&](const char *s){ + void *lib = dlopen(s, RTLD_NOW | RTLD_GLOBAL); + if (lib == NULL) { + fprintf(stderr, "Unable to load extlib '%s': %s\n", s, dlerror()); + exit(-1); + } + }); + parser.option(0, "dm-progsize", 1, + [&](const char* s){dm_config.progbufsize = atoul_safe(s);}); + parser.option(0, "dm-no-impebreak", 0, + [&](const char* s){dm_config.support_impebreak = false;}); + parser.option(0, "dm-sba", 1, + [&](const char* s){dm_config.max_sba_data_width = atoul_safe(s);}); + parser.option(0, "dm-auth", 0, + [&](const char* s){dm_config.require_authentication = true;}); + parser.option(0, "dmi-rti", 1, + [&](const char* s){dmi_rti = atoul_safe(s);}); + parser.option(0, "dm-abstract-rti", 1, + [&](const char* s){dm_config.abstract_rti = atoul_safe(s);}); + parser.option(0, "dm-no-hasel", 0, + [&](const char* s){dm_config.support_hasel = false;}); + parser.option(0, "dm-no-abstract-csr", 0, + [&](const char* s){dm_config.support_abstract_csr_access = false;}); + parser.option(0, "dm-no-halt-groups", 0, + [&](const char* s){dm_config.support_haltgroups = false;}); + parser.option(0, "log-commits", 0, + [&](const char* s){log_commits = true;}); + parser.option(0, "log", 1, + [&](const char* s){log_path = s;}); + FILE *cmd_file = NULL; + parser.option(0, "debug-cmd", 1, [&](const char* s){ + if ((cmd_file = fopen(s, "r"))==NULL) { + fprintf(stderr, "Unable to open command file '%s'\n", s); + exit(-1); + } + }); + parser.option(0, "blocksz", 1, [&](const char* s){ + blocksz = strtoull(s, 0, 0); + if (((blocksz & (blocksz - 1))) != 0) { + fprintf(stderr, "--blocksz should be power of 2\n"); + exit(-1); + } + }); + + auto argv1 = parser.parse(argv); + std::vector htif_args(argv1, (const char*const*)argv + argc); + + if (!*argv1) + help(); + + std::vector> mems = make_mems(cfg.mem_layout()); + + if (kernel && check_file_exists(kernel)) { + const char *isa = cfg.isa(); + kernel_size = get_file_size(kernel); + if (isa[2] == '6' && isa[3] == '4') + kernel_offset = 0x200000; + else + kernel_offset = 0x400000; + for (auto& m : mems) { + if (kernel_size && (kernel_offset + kernel_size) < m.second->size()) { + read_file_bytes(kernel, 0, m.second, kernel_offset, kernel_size); + break; + } + } + } + + if (initrd && check_file_exists(initrd)) { + size_t initrd_size = get_file_size(initrd); + for (auto& m : mems) { + if (initrd_size && (initrd_size + 0x1000) < m.second->size()) { + reg_t initrd_end = m.first + m.second->size() - 0x1000; + reg_t initrd_start = initrd_end - initrd_size; + cfg.initrd_bounds = std::make_pair(initrd_start, initrd_end); + read_file_bytes(initrd, 0, m.second, initrd_start - m.first, initrd_size); + break; + } + } + } + +#ifdef HAVE_BOOST_ASIO + boost::asio::io_service *io_service_ptr = NULL; // needed for socket command interface option -s + boost::asio::ip::tcp::acceptor *acceptor_ptr = NULL; + if (socket) { // if command line option -s is set + try + { // create socket server + using boost::asio::ip::tcp; + io_service_ptr = new boost::asio::io_service; + acceptor_ptr = new tcp::acceptor(*io_service_ptr, tcp::endpoint(tcp::v4(), 0)); + // aceptor is created passing argument port=0, so O.S. will choose a free port + std::string name = boost::asio::ip::host_name(); + std::cout << "Listening for debug commands on " << name.substr(0,name.find('.')) + << " port " << acceptor_ptr->local_endpoint().port() << " ." << std::endl; + // at the end, add space and some other character for convenience of javascript .split(" ") + } + catch (std::exception& e) + { + std::cerr << e.what() << std::endl; + exit(-1); + } + } +#endif + + if (cfg.explicit_hartids) { + if (nprocs.overridden() && (nprocs() != cfg.nprocs())) { + std::cerr << "Number of specified hartids (" + << cfg.nprocs() + << ") doesn't match specified number of processors (" + << nprocs() << ").\n"; + exit(1); + } + } else { + // Set default set of hartids based on nprocs, but don't set the + // explicit_hartids flag (which means that downstream code can know that + // we've only set the number of harts, not explicitly chosen their IDs). + std::vector default_hartids; + default_hartids.reserve(nprocs()); + for (size_t i = 0; i < nprocs(); ++i) { + default_hartids.push_back(i); + } + cfg.hartids = default_hartids; + } + + sim_t s(&cfg, halted, + mems, plugin_devices, htif_args, dm_config, log_path, dtb_enabled, dtb_file, +#ifdef HAVE_BOOST_ASIO + io_service_ptr, acceptor_ptr, +#endif + cmd_file); + std::unique_ptr remote_bitbang((remote_bitbang_t *) NULL); + std::unique_ptr jtag_dtm( + new jtag_dtm_t(&s.debug_module, dmi_rti)); + if (use_rbb) { + remote_bitbang.reset(new remote_bitbang_t(rbb_port, &(*jtag_dtm))); + s.set_remote_bitbang(&(*remote_bitbang)); + } + + if (dump_dts) { + printf("%s", s.get_dts()); + return 0; + } + + if (ic && l2) ic->set_miss_handler(&*l2); + if (dc && l2) dc->set_miss_handler(&*l2); + if (ic) ic->set_log(log_cache); + if (dc) dc->set_log(log_cache); + for (size_t i = 0; i < cfg.nprocs(); i++) + { + if (ic) s.get_core(i)->get_mmu()->register_memtracer(&*ic); + if (dc) s.get_core(i)->get_mmu()->register_memtracer(&*dc); + for (auto e : extensions) + s.get_core(i)->register_extension(e()); + s.get_core(i)->get_mmu()->set_cache_blocksz(blocksz); + } + + s.set_debug(debug); + s.configure_log(log, log_commits); + s.set_histogram(histogram); + + auto return_code = s.run(); + + for (auto& mem : mems) + delete mem.second; + + for (auto& plugin_device : plugin_devices) + delete plugin_device.second; + + return return_code; +} diff --git a/vendor/riscv-isa-sim/spike_main/spike_main.ac b/vendor/riscv-isa-sim/spike_main/spike_main.ac new file mode 100644 index 00000000..e69de29b diff --git a/vendor/riscv-isa-sim/spike_main/spike_main.mk.in b/vendor/riscv-isa-sim/spike_main/spike_main.mk.in new file mode 100644 index 00000000..35bef398 --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/spike_main.mk.in @@ -0,0 +1,16 @@ +spike_main_subproject_deps = \ + fdt \ + fesvr \ + softfloat \ + disasm \ + riscv \ + +spike_main_install_prog_srcs = \ + spike.cc \ + spike-log-parser.cc \ + xspike.cc \ + termios-xspike.cc \ + +spike_main_hdrs = \ + +spike_main_srcs = \ diff --git a/vendor/riscv-isa-sim/spike_main/termios-xspike.cc b/vendor/riscv-isa-sim/spike_main/termios-xspike.cc new file mode 100644 index 00000000..e533933b --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/termios-xspike.cc @@ -0,0 +1,29 @@ +// See LICENSE for license details. + +// termios-xspike sets up a canonical terminal and blocks forever. +// It allows us to send Ctrl-C etc. to the target machine. + +#include +#include +#include +#include +#include +#include + +int main() +{ + struct termios old_tios; + if (tcgetattr(0, &old_tios) < 0) + return -1; + + signal(SIGTERM, [](int) { }); + + struct termios new_tios = old_tios; + new_tios.c_lflag &= ~(ICANON | ECHO | ISIG); + if (tcsetattr(0, TCSANOW, &new_tios) < 0) + return -1; + + pause(); + + return tcsetattr(0, TCSANOW, &old_tios); +} diff --git a/vendor/riscv-isa-sim/spike_main/xspike.cc b/vendor/riscv-isa-sim/spike_main/xspike.cc new file mode 100644 index 00000000..f8c8ca7e --- /dev/null +++ b/vendor/riscv-isa-sim/spike_main/xspike.cc @@ -0,0 +1,102 @@ +// See LICENSE for license details. + +// xspike forks an xterm for spike's target machine console, +// preserving the current terminal for debugging. + +#include +#include +#include +#include +#include +#include +#include +#include + +static pid_t fork_spike(int tty_fd, int argc, char** argv); +static pid_t fork_xterm(int* tty_fd); + +int main(int argc, char** argv) +{ + int tty_fd, wait_status, ret = -1; + pid_t xterm, spike; + + static bool signal_exit = false; + auto handle_signal = [](int) { signal_exit = true; }; + + if ((xterm = fork_xterm(&tty_fd)) < 0) + { + fprintf(stderr, "could not open xterm\n"); + goto out; + } + + signal(SIGINT, handle_signal); + + if ((spike = fork_spike(tty_fd, argc, argv)) < 0) + { + fprintf(stderr, "could not open spike\n"); + goto close_xterm; + } + + while ((ret = waitpid(spike, &wait_status, 0)) < 0) + if (signal_exit) + break; + + if (ret < 0) // signal_exit + kill(spike, SIGTERM); + else + ret = WIFEXITED(wait_status) ? WEXITSTATUS(wait_status) : -1; + +close_xterm: + kill(-xterm, SIGTERM); +out: + return ret; +} + +static pid_t fork_spike(int tty_fd, int argc, char** argv) +{ + pid_t pid = fork(); + if (pid < 0) + return -1; + + if (pid == 0) + { + if (dup2(tty_fd, STDIN_FILENO) < 0 || dup2(tty_fd, STDOUT_FILENO) < 0) + return -1; + execvp("spike", argv); + return -1; + } + + return pid; +} + +static pid_t fork_xterm(int* tty_fd) +{ + static const char cmd[] = "3>&1 xterm -title xspike -e sh -c 'tty 1>&3; termios-xspike'"; + + int fds[2]; + if (pipe(fds) < 0) + return -1; + + pid_t pid = fork(); + if (pid < 0) + return -1; + + if (pid == 0) + { + setpgid(0, 0); + if (dup2(fds[1], STDOUT_FILENO) < 0) + return -1; + execl("/bin/sh", "sh", "-c", cmd, NULL); + return -1; + } + + char tty[PATH_MAX]; + ssize_t ttylen = read(fds[0], tty, sizeof(tty)); + if (ttylen <= 1 || tty[ttylen-1] != '\n') + return -1; + tty[ttylen-1] = '\0'; + if ((*tty_fd = open(tty, O_RDWR)) < 0) + return -1; + + return pid; +} diff --git a/vendor/riscv-isa-sim/tests/ebreak.py b/vendor/riscv-isa-sim/tests/ebreak.py new file mode 100755 index 00000000..dd7e6587 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/ebreak.py @@ -0,0 +1,26 @@ +#!/usr/bin/python + +import os +import testlib +import unittest +import tempfile +import time + +class EbreakTest(unittest.TestCase): + def setUp(self): + self.binary = testlib.compile("ebreak.s") + + def test_noport(self): + """Make sure that we can run past ebreak when --gdb-port isn't used.""" + spike = testlib.Spike(self.binary, with_gdb=False, timeout=10) + result = spike.wait() + self.assertEqual(result, 0) + + def test_nogdb(self): + """Make sure that we can run past ebreak when gdb isn't attached.""" + spike = testlib.Spike(self.binary, timeout=10) + result = spike.wait() + self.assertEqual(result, 0) + +if __name__ == '__main__': + unittest.main() diff --git a/vendor/riscv-isa-sim/tests/ebreak.s b/vendor/riscv-isa-sim/tests/ebreak.s new file mode 100644 index 00000000..99f3e07c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/ebreak.s @@ -0,0 +1,5 @@ + .global main +main: + li a0, 0 + ebreak + ret diff --git a/vendor/riscv-isa-sim/tests/mseccfg/Makefile b/vendor/riscv-isa-sim/tests/mseccfg/Makefile new file mode 100644 index 00000000..2277410c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/Makefile @@ -0,0 +1,70 @@ +# Makefile for program model example + +XLEN ?= 32 +VLEN ?= 1024 +RISCV_TOOL ?= /home/saad/Downloads/lowrisc-toolchain-gcc-rv32imcb-20220524-1/bin/ +SPIKE_PATH ?= /home/saad/work/riscv-isa-sim/build +SAIL_EMULATOR_PATH = /home/scratch.soberl_maxwell/arch1/sail_2021/sail-riscv/c_emulator + +SSP_OPT ?= +PERF ?= 0 + +LIB_PATH = . +# ../ctests/nvrvv_lib.c +COMMON_FILES = \ + $(LIB_PATH)/crt.S \ + $(LIB_PATH)/syscalls.c + +TEST_PATH = ./gengen_src/outputs + +ALL_TEST ?= $(basename $(notdir $(wildcard $(TEST_PATH)/*.c))) +DEV_TEST = test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04 +OBJECTS ?= $(ALL_TEST) +# NEVER enable 'C' because pc + 4 is used in test code. +# -ffast-math -fno-common -fno-builtin-printf +CFLAGS = -march=rv$(XLEN)imafd -O2 -I . -I ./$(LIB_PATH) -I ../softfloat -I ../riscv \ + -fno-builtin-printf -fdata-sections -fno-section-anchors $(SSP_OPT) -DPRINTF_SUPPORTED=1 +LDFLAGS = -mcmodel=medany -static -nostdlib -nostartfiles -lm -lgcc \ + -T $(LIB_PATH)/mseccfg_test.ld -Wl,-M -Wl,-Map=link.log + +# must enable 'C', maybe used in pk +# 8M for TCM memories +# 16M for L2 memories +SIM_ISA = --isa=RV$(XLEN)IMAFDC + +default: + @echo "make gen, to generate all test cases with gengen" + @echo "make run, to run all test cases" + @echo "set OBJECTS variant to select specified test case" + +gen: + cd gengen_src; $(MAKE); $(MAKE) gen; + +$(OBJECTS): + @$(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-gcc $(CFLAGS) $(TEST_PATH)/$@.c $(COMMON_FILES) $(LDFLAGS) -o a.out + @echo Running $(TEST_PATH)/$@.c - command - $(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-gcc $(CFLAGS) $(TEST_PATH)/$@.c $(COMMON_FILES) $(LDFLAGS) -o a.out + @$(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-objdump -d a.out > a.ss + @$(RISCV_TOOL)/riscv$(XLEN)-unknown-elf-objdump --disassemble=target_foo a.out >> a.ss +ifeq ($(PERF), 0) + $(SPIKE_PATH)/spike $(SIM_ISA) -m0x100000:0x200000 a.out +# $(SAIL_EMULATOR_PATH)/riscv_sim_RV64 --enable-pmp -V a.out > tmp.log 2>&1; grep SUCCESS tmp.log +# @! grep FAILURE tmp.log +# $(RISCV_TOOL)/spike $(SIM_ISA) -l a.out > $@_pc.log 2>&1 +# sed -i '0,/ nop/d' $@_pc.log +# sed -i '/ nop/q' $@_pc.log +endif + +run: $(OBJECTS) + +clean: + rm *.s *.o *.i *.ss *.out *.log *.bin + +log: + $(SPIKE_PATH)/spike $(SIM_ISA) -m0x100000:0x200000 -l a.out > 1.log 2>&1 + $(SAIL_EMULATOR_PATH)/riscv_sim_RV64 --enable-pmp a.out > 2.log 2>&1 + +env: + echo $(ALL_TEST) + + +.PHONY: gen $(OBJECTS) clean diff --git a/vendor/riscv-isa-sim/tests/mseccfg/crt.S b/vendor/riscv-isa-sim/tests/mseccfg/crt.S new file mode 100644 index 00000000..bfbceae2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/crt.S @@ -0,0 +1,230 @@ +# See LICENSE for license details. + +#include "encoding.h" + +#if __riscv_xlen == 64 +# define LREG ld +# define SREG sd +# define REGBYTES 8 +#else +# define LREG lw +# define SREG sw +# define REGBYTES 4 +#endif + + .section ".text.init" + .globl _start +_start: + li x1, 0 + li x2, 0 + li x3, 0 + li x4, 0 + li x5, 0 + li x6, 0 + li x7, 0 + li x8, 0 + li x9, 0 + li x10,0 + li x11,0 + li x12,0 + li x13,0 + li x14,0 + li x15,0 + li x16,0 + li x17,0 + li x18,0 + li x19,0 + li x20,0 + li x21,0 + li x22,0 + li x23,0 + li x24,0 + li x25,0 + li x26,0 + li x27,0 + li x28,0 + li x29,0 + li x30,0 + li x31,0 + + # enable FPU and accelerator if present + li t0, MSTATUS_FS | MSTATUS_XS + csrs mstatus, t0 + + # make sure XLEN agrees with compilation choice + li t0, 1 + slli t0, t0, 31 +#if __riscv_xlen == 64 + bgez t0, 1f +#else + bltz t0, 1f +#endif +2: + li a0, 1 + sw a0, tohost, t0 + j 2b +1: + +#ifdef __riscv_flen + # initialize FPU if we have one + la t0, 1f + csrw mtvec, t0 + + fssr x0 + fmv.s.x f0, x0 + fmv.s.x f1, x0 + fmv.s.x f2, x0 + fmv.s.x f3, x0 + fmv.s.x f4, x0 + fmv.s.x f5, x0 + fmv.s.x f6, x0 + fmv.s.x f7, x0 + fmv.s.x f8, x0 + fmv.s.x f9, x0 + fmv.s.x f10,x0 + fmv.s.x f11,x0 + fmv.s.x f12,x0 + fmv.s.x f13,x0 + fmv.s.x f14,x0 + fmv.s.x f15,x0 + fmv.s.x f16,x0 + fmv.s.x f17,x0 + fmv.s.x f18,x0 + fmv.s.x f19,x0 + fmv.s.x f20,x0 + fmv.s.x f21,x0 + fmv.s.x f22,x0 + fmv.s.x f23,x0 + fmv.s.x f24,x0 + fmv.s.x f25,x0 + fmv.s.x f26,x0 + fmv.s.x f27,x0 + fmv.s.x f28,x0 + fmv.s.x f29,x0 + fmv.s.x f30,x0 + fmv.s.x f31,x0 +1: +#endif + + # initialize trap vector + la t0, trap_entry + csrw mtvec, t0 + + # initialize global pointer +.option push +.option norelax + la gp, __global_pointer$ +.option pop + + la tp, _end + 63 + and tp, tp, -64 + + # get core id + csrr a0, mhartid + # for now, assume only 1 core + li a1, 1 +1:bgeu a0, a1, 1b + + # give each core 128KB of stack + TLS +#define STKSHIFT 17 + add sp, a0, 1 + sll sp, sp, STKSHIFT + add sp, sp, tp + sll a2, a0, STKSHIFT + add tp, tp, a2 + + j _init + + .align 2 +trap_entry: + #addi sp, sp, -272 + # use shadow address + SREG sp, 31*REGBYTES(gp) + addi sp, gp, 512 + addi sp, gp, 512 + + SREG x1, 1*REGBYTES(sp) + SREG x2, 2*REGBYTES(sp) + SREG x3, 3*REGBYTES(sp) + SREG x4, 4*REGBYTES(sp) + SREG x5, 5*REGBYTES(sp) + SREG x6, 6*REGBYTES(sp) + SREG x7, 7*REGBYTES(sp) + SREG x8, 8*REGBYTES(sp) + SREG x9, 9*REGBYTES(sp) + SREG x10, 10*REGBYTES(sp) + SREG x11, 11*REGBYTES(sp) + SREG x12, 12*REGBYTES(sp) + SREG x13, 13*REGBYTES(sp) + SREG x14, 14*REGBYTES(sp) + SREG x15, 15*REGBYTES(sp) + SREG x16, 16*REGBYTES(sp) + SREG x17, 17*REGBYTES(sp) + SREG x18, 18*REGBYTES(sp) + SREG x19, 19*REGBYTES(sp) + SREG x20, 20*REGBYTES(sp) + SREG x21, 21*REGBYTES(sp) + SREG x22, 22*REGBYTES(sp) + SREG x23, 23*REGBYTES(sp) + SREG x24, 24*REGBYTES(sp) + SREG x25, 25*REGBYTES(sp) + SREG x26, 26*REGBYTES(sp) + SREG x27, 27*REGBYTES(sp) + SREG x28, 28*REGBYTES(sp) + SREG x29, 29*REGBYTES(sp) + SREG x30, 30*REGBYTES(sp) + SREG x31, 31*REGBYTES(sp) + + csrr a0, mcause + csrr a1, mepc + mv a2, sp + jal handle_trap + csrw mepc, a0 + + # Remain in M-mode after eret + #li t0, MSTATUS_MPP + #csrs mstatus, t0 + + LREG x1, 1*REGBYTES(sp) + LREG x2, 2*REGBYTES(sp) + LREG x3, 3*REGBYTES(sp) + LREG x4, 4*REGBYTES(sp) + LREG x5, 5*REGBYTES(sp) + LREG x6, 6*REGBYTES(sp) + LREG x7, 7*REGBYTES(sp) + LREG x8, 8*REGBYTES(sp) + LREG x9, 9*REGBYTES(sp) + LREG x10, 10*REGBYTES(sp) + LREG x11, 11*REGBYTES(sp) + LREG x12, 12*REGBYTES(sp) + LREG x13, 13*REGBYTES(sp) + LREG x14, 14*REGBYTES(sp) + LREG x15, 15*REGBYTES(sp) + LREG x16, 16*REGBYTES(sp) + LREG x17, 17*REGBYTES(sp) + LREG x18, 18*REGBYTES(sp) + LREG x19, 19*REGBYTES(sp) + LREG x20, 20*REGBYTES(sp) + LREG x21, 21*REGBYTES(sp) + LREG x22, 22*REGBYTES(sp) + LREG x23, 23*REGBYTES(sp) + LREG x24, 24*REGBYTES(sp) + LREG x25, 25*REGBYTES(sp) + LREG x26, 26*REGBYTES(sp) + LREG x27, 27*REGBYTES(sp) + LREG x28, 28*REGBYTES(sp) + LREG x29, 29*REGBYTES(sp) + LREG x30, 30*REGBYTES(sp) + LREG x31, 31*REGBYTES(sp) + + #addi sp, sp, 272 + LREG sp, 31*REGBYTES(gp) + mret + +.section ".tohost","aw",@progbits +.align 6 +.globl tohost +tohost: .dword 0 +.align 6 +.globl fromhost +fromhost: .dword 0 diff --git a/vendor/riscv-isa-sim/tests/mseccfg/encoding.h b/vendor/riscv-isa-sim/tests/mseccfg/encoding.h new file mode 100644 index 00000000..e32f9580 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/encoding.h @@ -0,0 +1,1473 @@ +// See LICENSE for license details. + +#ifndef RISCV_CSR_ENCODING_H +#define RISCV_CSR_ENCODING_H + +#define MSTATUS_UIE 0x00000001 +#define MSTATUS_SIE 0x00000002 +#define MSTATUS_HIE 0x00000004 +#define MSTATUS_MIE 0x00000008 +#define MSTATUS_UPIE 0x00000010 +#define MSTATUS_SPIE 0x00000020 +#define MSTATUS_HPIE 0x00000040 +#define MSTATUS_MPIE 0x00000080 +#define MSTATUS_SPP 0x00000100 +#define MSTATUS_HPP 0x00000600 +#define MSTATUS_MPP 0x00001800 +#define MSTATUS_FS 0x00006000 +#define MSTATUS_XS 0x00018000 +#define MSTATUS_MPRV 0x00020000 +#define MSTATUS_SUM 0x00040000 +#define MSTATUS_MXR 0x00080000 +#define MSTATUS_TVM 0x00100000 +#define MSTATUS_TW 0x00200000 +#define MSTATUS_TSR 0x00400000 +#define MSTATUS_VS 0x01800000 +#define MSTATUS32_SD 0x80000000 +#define MSTATUS_UXL 0x0000000300000000 +#define MSTATUS_SXL 0x0000000C00000000 +#define MSTATUS64_SD 0x8000000000000000 + +#define SSTATUS_UIE 0x00000001 +#define SSTATUS_SIE 0x00000002 +#define SSTATUS_UPIE 0x00000010 +#define SSTATUS_SPIE 0x00000020 +#define SSTATUS_SPP 0x00000100 +#define SSTATUS_FS 0x00006000 +#define SSTATUS_XS 0x00018000 +#define SSTATUS_SUM 0x00040000 +#define SSTATUS_MXR 0x00080000 +#define SSTATUS_VS 0x01800000 +#define SSTATUS32_SD 0x80000000 +#define SSTATUS_UXL 0x0000000300000000 +#define SSTATUS64_SD 0x8000000000000000 + +#define DCSR_XDEBUGVER (3U<<30) +#define DCSR_NDRESET (1<<29) +#define DCSR_FULLRESET (1<<28) +#define DCSR_EBREAKM (1<<15) +#define DCSR_EBREAKH (1<<14) +#define DCSR_EBREAKS (1<<13) +#define DCSR_EBREAKU (1<<12) +#define DCSR_STOPCYCLE (1<<10) +#define DCSR_STOPTIME (1<<9) +#define DCSR_CAUSE (7<<6) +#define DCSR_DEBUGINT (1<<5) +#define DCSR_HALT (1<<3) +#define DCSR_STEP (1<<2) +#define DCSR_PRV (3<<0) + +#define DCSR_CAUSE_NONE 0 +#define DCSR_CAUSE_SWBP 1 +#define DCSR_CAUSE_HWBP 2 +#define DCSR_CAUSE_DEBUGINT 3 +#define DCSR_CAUSE_STEP 4 +#define DCSR_CAUSE_HALT 5 + +#define MCONTROL_TYPE(xlen) (0xfULL<<((xlen)-4)) +#define MCONTROL_DMODE(xlen) (1ULL<<((xlen)-5)) +#define MCONTROL_MASKMAX(xlen) (0x3fULL<<((xlen)-11)) + +#define MCONTROL_SELECT (1<<19) +#define MCONTROL_TIMING (1<<18) +#define MCONTROL_ACTION (0x3f<<12) +#define MCONTROL_CHAIN (1<<11) +#define MCONTROL_MATCH (0xf<<7) +#define MCONTROL_M (1<<6) +#define MCONTROL_H (1<<5) +#define MCONTROL_S (1<<4) +#define MCONTROL_U (1<<3) +#define MCONTROL_EXECUTE (1<<2) +#define MCONTROL_STORE (1<<1) +#define MCONTROL_LOAD (1<<0) + +#define MCONTROL_TYPE_NONE 0 +#define MCONTROL_TYPE_MATCH 2 + +#define MCONTROL_ACTION_DEBUG_EXCEPTION 0 +#define MCONTROL_ACTION_DEBUG_MODE 1 +#define MCONTROL_ACTION_TRACE_START 2 +#define MCONTROL_ACTION_TRACE_STOP 3 +#define MCONTROL_ACTION_TRACE_EMIT 4 + +#define MCONTROL_MATCH_EQUAL 0 +#define MCONTROL_MATCH_NAPOT 1 +#define MCONTROL_MATCH_GE 2 +#define MCONTROL_MATCH_LT 3 +#define MCONTROL_MATCH_MASK_LOW 4 +#define MCONTROL_MATCH_MASK_HIGH 5 + +#define MIP_SSIP (1 << IRQ_S_SOFT) +#define MIP_HSIP (1 << IRQ_H_SOFT) +#define MIP_MSIP (1 << IRQ_M_SOFT) +#define MIP_STIP (1 << IRQ_S_TIMER) +#define MIP_HTIP (1 << IRQ_H_TIMER) +#define MIP_MTIP (1 << IRQ_M_TIMER) +#define MIP_SEIP (1 << IRQ_S_EXT) +#define MIP_HEIP (1 << IRQ_H_EXT) +#define MIP_MEIP (1 << IRQ_M_EXT) + +#define SIP_SSIP MIP_SSIP +#define SIP_STIP MIP_STIP + +#define PRV_U 0 +#define PRV_S 1 +#define PRV_H 2 +#define PRV_M 3 + +#define SATP32_MODE 0x80000000 +#define SATP32_ASID 0x7FC00000 +#define SATP32_PPN 0x003FFFFF +#define SATP64_MODE 0xF000000000000000 +#define SATP64_ASID 0x0FFFF00000000000 +#define SATP64_PPN 0x00000FFFFFFFFFFF + +#define SATP_MODE_OFF 0 +#define SATP_MODE_SV32 1 +#define SATP_MODE_SV39 8 +#define SATP_MODE_SV48 9 +#define SATP_MODE_SV57 10 +#define SATP_MODE_SV64 11 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define IRQ_S_SOFT 1 +#define IRQ_H_SOFT 2 +#define IRQ_M_SOFT 3 +#define IRQ_S_TIMER 5 +#define IRQ_H_TIMER 6 +#define IRQ_M_TIMER 7 +#define IRQ_S_EXT 9 +#define IRQ_H_EXT 10 +#define IRQ_M_EXT 11 +#define IRQ_COP 12 +#define IRQ_HOST 13 + +#define DEFAULT_RSTVEC 0x00001000 +#define CLINT_BASE 0x02000000 +#define CLINT_SIZE 0x000c0000 +#define EXT_IO_BASE 0x40000000 +#define DRAM_BASE 0x80000000 + +// page table entry (PTE) fields +#define PTE_V 0x001 // Valid +#define PTE_R 0x002 // Read +#define PTE_W 0x004 // Write +#define PTE_X 0x008 // Execute +#define PTE_U 0x010 // User +#define PTE_G 0x020 // Global +#define PTE_A 0x040 // Accessed +#define PTE_D 0x080 // Dirty +#define PTE_SOFT 0x300 // Reserved for Software + +#define PTE_PPN_SHIFT 10 + +#define PTE_TABLE(PTE) (((PTE) & (PTE_V | PTE_R | PTE_W | PTE_X)) == PTE_V) + +#ifdef __riscv + +#if __riscv_xlen == 64 +# define MSTATUS_SD MSTATUS64_SD +# define SSTATUS_SD SSTATUS64_SD +# define RISCV_PGLEVEL_BITS 9 +# define SATP_MODE SATP64_MODE +#else +# define MSTATUS_SD MSTATUS32_SD +# define SSTATUS_SD SSTATUS32_SD +# define RISCV_PGLEVEL_BITS 10 +# define SATP_MODE SATP32_MODE +#endif +#define RISCV_PGSHIFT 12 +#define RISCV_PGSIZE (1 << RISCV_PGSHIFT) + +#ifndef __ASSEMBLER__ + +#ifdef __GNUC__ + +#define read_csr(reg) ({ unsigned long __tmp; \ + asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \ + __tmp; }) + +#define write_csr(reg, val) ({ \ + asm volatile ("csrw " #reg ", %0" :: "rK"(val)); }) + +#define swap_csr(reg, val) ({ unsigned long __tmp; \ + asm volatile ("csrrw %0, " #reg ", %1" : "=r"(__tmp) : "rK"(val)); \ + __tmp; }) + +#define set_csr(reg, bit) ({ unsigned long __tmp; \ + asm volatile ("csrrs %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + __tmp; }) + +#define clear_csr(reg, bit) ({ unsigned long __tmp; \ + asm volatile ("csrrc %0, " #reg ", %1" : "=r"(__tmp) : "rK"(bit)); \ + __tmp; }) + +#define rdtime() read_csr(time) +#define rdcycle() read_csr(cycle) +#define rdinstret() read_csr(instret) + +#endif + +#endif + +#endif + +#endif +/* Automatically generated by parse-opcodes. */ +#ifndef RISCV_ENCODING_H +#define RISCV_ENCODING_H +#define MATCH_BEQ 0x63 +#define MASK_BEQ 0x707f +#define MATCH_BNE 0x1063 +#define MASK_BNE 0x707f +#define MATCH_BLT 0x4063 +#define MASK_BLT 0x707f +#define MATCH_BGE 0x5063 +#define MASK_BGE 0x707f +#define MATCH_BLTU 0x6063 +#define MASK_BLTU 0x707f +#define MATCH_BGEU 0x7063 +#define MASK_BGEU 0x707f +#define MATCH_JALR 0x67 +#define MASK_JALR 0x707f +#define MATCH_JAL 0x6f +#define MASK_JAL 0x7f +#define MATCH_LUI 0x37 +#define MASK_LUI 0x7f +#define MATCH_AUIPC 0x17 +#define MASK_AUIPC 0x7f +#define MATCH_ADDI 0x13 +#define MASK_ADDI 0x707f +#define MATCH_SLLI 0x1013 +#define MASK_SLLI 0xfc00707f +#define MATCH_SLTI 0x2013 +#define MASK_SLTI 0x707f +#define MATCH_SLTIU 0x3013 +#define MASK_SLTIU 0x707f +#define MATCH_XORI 0x4013 +#define MASK_XORI 0x707f +#define MATCH_SRLI 0x5013 +#define MASK_SRLI 0xfc00707f +#define MATCH_SRAI 0x40005013 +#define MASK_SRAI 0xfc00707f +#define MATCH_ORI 0x6013 +#define MASK_ORI 0x707f +#define MATCH_ANDI 0x7013 +#define MASK_ANDI 0x707f +#define MATCH_ADD 0x33 +#define MASK_ADD 0xfe00707f +#define MATCH_SUB 0x40000033 +#define MASK_SUB 0xfe00707f +#define MATCH_SLL 0x1033 +#define MASK_SLL 0xfe00707f +#define MATCH_SLT 0x2033 +#define MASK_SLT 0xfe00707f +#define MATCH_SLTU 0x3033 +#define MASK_SLTU 0xfe00707f +#define MATCH_XOR 0x4033 +#define MASK_XOR 0xfe00707f +#define MATCH_SRL 0x5033 +#define MASK_SRL 0xfe00707f +#define MATCH_SRA 0x40005033 +#define MASK_SRA 0xfe00707f +#define MATCH_OR 0x6033 +#define MASK_OR 0xfe00707f +#define MATCH_AND 0x7033 +#define MASK_AND 0xfe00707f +#define MATCH_ADDIW 0x1b +#define MASK_ADDIW 0x707f +#define MATCH_SLLIW 0x101b +#define MASK_SLLIW 0xfe00707f +#define MATCH_SRLIW 0x501b +#define MASK_SRLIW 0xfe00707f +#define MATCH_SRAIW 0x4000501b +#define MASK_SRAIW 0xfe00707f +#define MATCH_ADDW 0x3b +#define MASK_ADDW 0xfe00707f +#define MATCH_SUBW 0x4000003b +#define MASK_SUBW 0xfe00707f +#define MATCH_SLLW 0x103b +#define MASK_SLLW 0xfe00707f +#define MATCH_SRLW 0x503b +#define MASK_SRLW 0xfe00707f +#define MATCH_SRAW 0x4000503b +#define MASK_SRAW 0xfe00707f +#define MATCH_LB 0x3 +#define MASK_LB 0x707f +#define MATCH_LH 0x1003 +#define MASK_LH 0x707f +#define MATCH_LW 0x2003 +#define MASK_LW 0x707f +#define MATCH_LD 0x3003 +#define MASK_LD 0x707f +#define MATCH_LBU 0x4003 +#define MASK_LBU 0x707f +#define MATCH_LHU 0x5003 +#define MASK_LHU 0x707f +#define MATCH_LWU 0x6003 +#define MASK_LWU 0x707f +#define MATCH_SB 0x23 +#define MASK_SB 0x707f +#define MATCH_SH 0x1023 +#define MASK_SH 0x707f +#define MATCH_SW 0x2023 +#define MASK_SW 0x707f +#define MATCH_SD 0x3023 +#define MASK_SD 0x707f +#define MATCH_FENCE 0xf +#define MASK_FENCE 0x707f +#define MATCH_FENCE_I 0x100f +#define MASK_FENCE_I 0x707f +#define MATCH_MUL 0x2000033 +#define MASK_MUL 0xfe00707f +#define MATCH_MULH 0x2001033 +#define MASK_MULH 0xfe00707f +#define MATCH_MULHSU 0x2002033 +#define MASK_MULHSU 0xfe00707f +#define MATCH_MULHU 0x2003033 +#define MASK_MULHU 0xfe00707f +#define MATCH_DIV 0x2004033 +#define MASK_DIV 0xfe00707f +#define MATCH_DIVU 0x2005033 +#define MASK_DIVU 0xfe00707f +#define MATCH_REM 0x2006033 +#define MASK_REM 0xfe00707f +#define MATCH_REMU 0x2007033 +#define MASK_REMU 0xfe00707f +#define MATCH_MULW 0x200003b +#define MASK_MULW 0xfe00707f +#define MATCH_DIVW 0x200403b +#define MASK_DIVW 0xfe00707f +#define MATCH_DIVUW 0x200503b +#define MASK_DIVUW 0xfe00707f +#define MATCH_REMW 0x200603b +#define MASK_REMW 0xfe00707f +#define MATCH_REMUW 0x200703b +#define MASK_REMUW 0xfe00707f +#define MATCH_AMOADD_W 0x202f +#define MASK_AMOADD_W 0xf800707f +#define MATCH_AMOXOR_W 0x2000202f +#define MASK_AMOXOR_W 0xf800707f +#define MATCH_AMOOR_W 0x4000202f +#define MASK_AMOOR_W 0xf800707f +#define MATCH_AMOAND_W 0x6000202f +#define MASK_AMOAND_W 0xf800707f +#define MATCH_AMOMIN_W 0x8000202f +#define MASK_AMOMIN_W 0xf800707f +#define MATCH_AMOMAX_W 0xa000202f +#define MASK_AMOMAX_W 0xf800707f +#define MATCH_AMOMINU_W 0xc000202f +#define MASK_AMOMINU_W 0xf800707f +#define MATCH_AMOMAXU_W 0xe000202f +#define MASK_AMOMAXU_W 0xf800707f +#define MATCH_AMOSWAP_W 0x800202f +#define MASK_AMOSWAP_W 0xf800707f +#define MATCH_LR_W 0x1000202f +#define MASK_LR_W 0xf9f0707f +#define MATCH_SC_W 0x1800202f +#define MASK_SC_W 0xf800707f +#define MATCH_AMOADD_D 0x302f +#define MASK_AMOADD_D 0xf800707f +#define MATCH_AMOXOR_D 0x2000302f +#define MASK_AMOXOR_D 0xf800707f +#define MATCH_AMOOR_D 0x4000302f +#define MASK_AMOOR_D 0xf800707f +#define MATCH_AMOAND_D 0x6000302f +#define MASK_AMOAND_D 0xf800707f +#define MATCH_AMOMIN_D 0x8000302f +#define MASK_AMOMIN_D 0xf800707f +#define MATCH_AMOMAX_D 0xa000302f +#define MASK_AMOMAX_D 0xf800707f +#define MATCH_AMOMINU_D 0xc000302f +#define MASK_AMOMINU_D 0xf800707f +#define MATCH_AMOMAXU_D 0xe000302f +#define MASK_AMOMAXU_D 0xf800707f +#define MATCH_AMOSWAP_D 0x800302f +#define MASK_AMOSWAP_D 0xf800707f +#define MATCH_LR_D 0x1000302f +#define MASK_LR_D 0xf9f0707f +#define MATCH_SC_D 0x1800302f +#define MASK_SC_D 0xf800707f +#define MATCH_ECALL 0x73 +#define MASK_ECALL 0xffffffff +#define MATCH_EBREAK 0x100073 +#define MASK_EBREAK 0xffffffff +#define MATCH_URET 0x200073 +#define MASK_URET 0xffffffff +#define MATCH_SRET 0x10200073 +#define MASK_SRET 0xffffffff +#define MATCH_MRET 0x30200073 +#define MASK_MRET 0xffffffff +#define MATCH_DRET 0x7b200073 +#define MASK_DRET 0xffffffff +#define MATCH_SFENCE_VMA 0x12000073 +#define MASK_SFENCE_VMA 0xfe007fff +#define MATCH_WFI 0x10500073 +#define MASK_WFI 0xffffffff +#define MATCH_CSRRW 0x1073 +#define MASK_CSRRW 0x707f +#define MATCH_CSRRS 0x2073 +#define MASK_CSRRS 0x707f +#define MATCH_CSRRC 0x3073 +#define MASK_CSRRC 0x707f +#define MATCH_CSRRWI 0x5073 +#define MASK_CSRRWI 0x707f +#define MATCH_CSRRSI 0x6073 +#define MASK_CSRRSI 0x707f +#define MATCH_CSRRCI 0x7073 +#define MASK_CSRRCI 0x707f +#define MATCH_FADD_S 0x53 +#define MASK_FADD_S 0xfe00007f +#define MATCH_FSUB_S 0x8000053 +#define MASK_FSUB_S 0xfe00007f +#define MATCH_FMUL_S 0x10000053 +#define MASK_FMUL_S 0xfe00007f +#define MATCH_FDIV_S 0x18000053 +#define MASK_FDIV_S 0xfe00007f +#define MATCH_FSGNJ_S 0x20000053 +#define MASK_FSGNJ_S 0xfe00707f +#define MATCH_FSGNJN_S 0x20001053 +#define MASK_FSGNJN_S 0xfe00707f +#define MATCH_FSGNJX_S 0x20002053 +#define MASK_FSGNJX_S 0xfe00707f +#define MATCH_FMIN_S 0x28000053 +#define MASK_FMIN_S 0xfe00707f +#define MATCH_FMAX_S 0x28001053 +#define MASK_FMAX_S 0xfe00707f +#define MATCH_FSQRT_S 0x58000053 +#define MASK_FSQRT_S 0xfff0007f +#define MATCH_FADD_D 0x2000053 +#define MASK_FADD_D 0xfe00007f +#define MATCH_FSUB_D 0xa000053 +#define MASK_FSUB_D 0xfe00007f +#define MATCH_FMUL_D 0x12000053 +#define MASK_FMUL_D 0xfe00007f +#define MATCH_FDIV_D 0x1a000053 +#define MASK_FDIV_D 0xfe00007f +#define MATCH_FSGNJ_D 0x22000053 +#define MASK_FSGNJ_D 0xfe00707f +#define MATCH_FSGNJN_D 0x22001053 +#define MASK_FSGNJN_D 0xfe00707f +#define MATCH_FSGNJX_D 0x22002053 +#define MASK_FSGNJX_D 0xfe00707f +#define MATCH_FMIN_D 0x2a000053 +#define MASK_FMIN_D 0xfe00707f +#define MATCH_FMAX_D 0x2a001053 +#define MASK_FMAX_D 0xfe00707f +#define MATCH_FCVT_S_D 0x40100053 +#define MASK_FCVT_S_D 0xfff0007f +#define MATCH_FCVT_D_S 0x42000053 +#define MASK_FCVT_D_S 0xfff0007f +#define MATCH_FSQRT_D 0x5a000053 +#define MASK_FSQRT_D 0xfff0007f +#define MATCH_FADD_Q 0x6000053 +#define MASK_FADD_Q 0xfe00007f +#define MATCH_FSUB_Q 0xe000053 +#define MASK_FSUB_Q 0xfe00007f +#define MATCH_FMUL_Q 0x16000053 +#define MASK_FMUL_Q 0xfe00007f +#define MATCH_FDIV_Q 0x1e000053 +#define MASK_FDIV_Q 0xfe00007f +#define MATCH_FSGNJ_Q 0x26000053 +#define MASK_FSGNJ_Q 0xfe00707f +#define MATCH_FSGNJN_Q 0x26001053 +#define MASK_FSGNJN_Q 0xfe00707f +#define MATCH_FSGNJX_Q 0x26002053 +#define MASK_FSGNJX_Q 0xfe00707f +#define MATCH_FMIN_Q 0x2e000053 +#define MASK_FMIN_Q 0xfe00707f +#define MATCH_FMAX_Q 0x2e001053 +#define MASK_FMAX_Q 0xfe00707f +#define MATCH_FCVT_S_Q 0x40300053 +#define MASK_FCVT_S_Q 0xfff0007f +#define MATCH_FCVT_Q_S 0x46000053 +#define MASK_FCVT_Q_S 0xfff0007f +#define MATCH_FCVT_D_Q 0x42300053 +#define MASK_FCVT_D_Q 0xfff0007f +#define MATCH_FCVT_Q_D 0x46100053 +#define MASK_FCVT_Q_D 0xfff0007f +#define MATCH_FSQRT_Q 0x5e000053 +#define MASK_FSQRT_Q 0xfff0007f +#define MATCH_FLE_S 0xa0000053 +#define MASK_FLE_S 0xfe00707f +#define MATCH_FLT_S 0xa0001053 +#define MASK_FLT_S 0xfe00707f +#define MATCH_FEQ_S 0xa0002053 +#define MASK_FEQ_S 0xfe00707f +#define MATCH_FLE_D 0xa2000053 +#define MASK_FLE_D 0xfe00707f +#define MATCH_FLT_D 0xa2001053 +#define MASK_FLT_D 0xfe00707f +#define MATCH_FEQ_D 0xa2002053 +#define MASK_FEQ_D 0xfe00707f +#define MATCH_FLE_Q 0xa6000053 +#define MASK_FLE_Q 0xfe00707f +#define MATCH_FLT_Q 0xa6001053 +#define MASK_FLT_Q 0xfe00707f +#define MATCH_FEQ_Q 0xa6002053 +#define MASK_FEQ_Q 0xfe00707f +#define MATCH_FCVT_W_S 0xc0000053 +#define MASK_FCVT_W_S 0xfff0007f +#define MATCH_FCVT_WU_S 0xc0100053 +#define MASK_FCVT_WU_S 0xfff0007f +#define MATCH_FCVT_L_S 0xc0200053 +#define MASK_FCVT_L_S 0xfff0007f +#define MATCH_FCVT_LU_S 0xc0300053 +#define MASK_FCVT_LU_S 0xfff0007f +#define MATCH_FMV_X_W 0xe0000053 +#define MASK_FMV_X_W 0xfff0707f +#define MATCH_FCLASS_S 0xe0001053 +#define MASK_FCLASS_S 0xfff0707f +#define MATCH_FCVT_W_D 0xc2000053 +#define MASK_FCVT_W_D 0xfff0007f +#define MATCH_FCVT_WU_D 0xc2100053 +#define MASK_FCVT_WU_D 0xfff0007f +#define MATCH_FCVT_L_D 0xc2200053 +#define MASK_FCVT_L_D 0xfff0007f +#define MATCH_FCVT_LU_D 0xc2300053 +#define MASK_FCVT_LU_D 0xfff0007f +#define MATCH_FMV_X_D 0xe2000053 +#define MASK_FMV_X_D 0xfff0707f +#define MATCH_FCLASS_D 0xe2001053 +#define MASK_FCLASS_D 0xfff0707f +#define MATCH_FCVT_W_Q 0xc6000053 +#define MASK_FCVT_W_Q 0xfff0007f +#define MATCH_FCVT_WU_Q 0xc6100053 +#define MASK_FCVT_WU_Q 0xfff0007f +#define MATCH_FCVT_L_Q 0xc6200053 +#define MASK_FCVT_L_Q 0xfff0007f +#define MATCH_FCVT_LU_Q 0xc6300053 +#define MASK_FCVT_LU_Q 0xfff0007f +#define MATCH_FMV_X_Q 0xe6000053 +#define MASK_FMV_X_Q 0xfff0707f +#define MATCH_FCLASS_Q 0xe6001053 +#define MASK_FCLASS_Q 0xfff0707f +#define MATCH_FCVT_S_W 0xd0000053 +#define MASK_FCVT_S_W 0xfff0007f +#define MATCH_FCVT_S_WU 0xd0100053 +#define MASK_FCVT_S_WU 0xfff0007f +#define MATCH_FCVT_S_L 0xd0200053 +#define MASK_FCVT_S_L 0xfff0007f +#define MATCH_FCVT_S_LU 0xd0300053 +#define MASK_FCVT_S_LU 0xfff0007f +#define MATCH_FMV_W_X 0xf0000053 +#define MASK_FMV_W_X 0xfff0707f +#define MATCH_FCVT_D_W 0xd2000053 +#define MASK_FCVT_D_W 0xfff0007f +#define MATCH_FCVT_D_WU 0xd2100053 +#define MASK_FCVT_D_WU 0xfff0007f +#define MATCH_FCVT_D_L 0xd2200053 +#define MASK_FCVT_D_L 0xfff0007f +#define MATCH_FCVT_D_LU 0xd2300053 +#define MASK_FCVT_D_LU 0xfff0007f +#define MATCH_FMV_D_X 0xf2000053 +#define MASK_FMV_D_X 0xfff0707f +#define MATCH_FCVT_Q_W 0xd6000053 +#define MASK_FCVT_Q_W 0xfff0007f +#define MATCH_FCVT_Q_WU 0xd6100053 +#define MASK_FCVT_Q_WU 0xfff0007f +#define MATCH_FCVT_Q_L 0xd6200053 +#define MASK_FCVT_Q_L 0xfff0007f +#define MATCH_FCVT_Q_LU 0xd6300053 +#define MASK_FCVT_Q_LU 0xfff0007f +#define MATCH_FMV_Q_X 0xf6000053 +#define MASK_FMV_Q_X 0xfff0707f +#define MATCH_FLW 0x2007 +#define MASK_FLW 0x707f +#define MATCH_FLD 0x3007 +#define MASK_FLD 0x707f +#define MATCH_FLQ 0x4007 +#define MASK_FLQ 0x707f +#define MATCH_FSW 0x2027 +#define MASK_FSW 0x707f +#define MATCH_FSD 0x3027 +#define MASK_FSD 0x707f +#define MATCH_FSQ 0x4027 +#define MASK_FSQ 0x707f +#define MATCH_FMADD_S 0x43 +#define MASK_FMADD_S 0x600007f +#define MATCH_FMSUB_S 0x47 +#define MASK_FMSUB_S 0x600007f +#define MATCH_FNMSUB_S 0x4b +#define MASK_FNMSUB_S 0x600007f +#define MATCH_FNMADD_S 0x4f +#define MASK_FNMADD_S 0x600007f +#define MATCH_FMADD_D 0x2000043 +#define MASK_FMADD_D 0x600007f +#define MATCH_FMSUB_D 0x2000047 +#define MASK_FMSUB_D 0x600007f +#define MATCH_FNMSUB_D 0x200004b +#define MASK_FNMSUB_D 0x600007f +#define MATCH_FNMADD_D 0x200004f +#define MASK_FNMADD_D 0x600007f +#define MATCH_FMADD_Q 0x6000043 +#define MASK_FMADD_Q 0x600007f +#define MATCH_FMSUB_Q 0x6000047 +#define MASK_FMSUB_Q 0x600007f +#define MATCH_FNMSUB_Q 0x600004b +#define MASK_FNMSUB_Q 0x600007f +#define MATCH_FNMADD_Q 0x600004f +#define MASK_FNMADD_Q 0x600007f +#define MATCH_C_NOP 0x1 +#define MASK_C_NOP 0xffff +#define MATCH_C_ADDI16SP 0x6101 +#define MASK_C_ADDI16SP 0xef83 +#define MATCH_C_JR 0x8002 +#define MASK_C_JR 0xf07f +#define MATCH_C_JALR 0x9002 +#define MASK_C_JALR 0xf07f +#define MATCH_C_EBREAK 0x9002 +#define MASK_C_EBREAK 0xffff +#define MATCH_C_LD 0x6000 +#define MASK_C_LD 0xe003 +#define MATCH_C_SD 0xe000 +#define MASK_C_SD 0xe003 +#define MATCH_C_ADDIW 0x2001 +#define MASK_C_ADDIW 0xe003 +#define MATCH_C_LDSP 0x6002 +#define MASK_C_LDSP 0xe003 +#define MATCH_C_SDSP 0xe002 +#define MASK_C_SDSP 0xe003 +#define MATCH_C_ADDI4SPN 0x0 +#define MASK_C_ADDI4SPN 0xe003 +#define MATCH_C_FLD 0x2000 +#define MASK_C_FLD 0xe003 +#define MATCH_C_LW 0x4000 +#define MASK_C_LW 0xe003 +#define MATCH_C_FLW 0x6000 +#define MASK_C_FLW 0xe003 +#define MATCH_C_FSD 0xa000 +#define MASK_C_FSD 0xe003 +#define MATCH_C_SW 0xc000 +#define MASK_C_SW 0xe003 +#define MATCH_C_FSW 0xe000 +#define MASK_C_FSW 0xe003 +#define MATCH_C_ADDI 0x1 +#define MASK_C_ADDI 0xe003 +#define MATCH_C_JAL 0x2001 +#define MASK_C_JAL 0xe003 +#define MATCH_C_LI 0x4001 +#define MASK_C_LI 0xe003 +#define MATCH_C_LUI 0x6001 +#define MASK_C_LUI 0xe003 +#define MATCH_C_SRLI 0x8001 +#define MASK_C_SRLI 0xec03 +#define MATCH_C_SRAI 0x8401 +#define MASK_C_SRAI 0xec03 +#define MATCH_C_ANDI 0x8801 +#define MASK_C_ANDI 0xec03 +#define MATCH_C_SUB 0x8c01 +#define MASK_C_SUB 0xfc63 +#define MATCH_C_XOR 0x8c21 +#define MASK_C_XOR 0xfc63 +#define MATCH_C_OR 0x8c41 +#define MASK_C_OR 0xfc63 +#define MATCH_C_AND 0x8c61 +#define MASK_C_AND 0xfc63 +#define MATCH_C_SUBW 0x9c01 +#define MASK_C_SUBW 0xfc63 +#define MATCH_C_ADDW 0x9c21 +#define MASK_C_ADDW 0xfc63 +#define MATCH_C_J 0xa001 +#define MASK_C_J 0xe003 +#define MATCH_C_BEQZ 0xc001 +#define MASK_C_BEQZ 0xe003 +#define MATCH_C_BNEZ 0xe001 +#define MASK_C_BNEZ 0xe003 +#define MATCH_C_SLLI 0x2 +#define MASK_C_SLLI 0xe003 +#define MATCH_C_FLDSP 0x2002 +#define MASK_C_FLDSP 0xe003 +#define MATCH_C_LWSP 0x4002 +#define MASK_C_LWSP 0xe003 +#define MATCH_C_FLWSP 0x6002 +#define MASK_C_FLWSP 0xe003 +#define MATCH_C_MV 0x8002 +#define MASK_C_MV 0xf003 +#define MATCH_C_ADD 0x9002 +#define MASK_C_ADD 0xf003 +#define MATCH_C_FSDSP 0xa002 +#define MASK_C_FSDSP 0xe003 +#define MATCH_C_SWSP 0xc002 +#define MASK_C_SWSP 0xe003 +#define MATCH_C_FSWSP 0xe002 +#define MASK_C_FSWSP 0xe003 +#define MATCH_CUSTOM0 0xb +#define MASK_CUSTOM0 0x707f +#define MATCH_CUSTOM0_RS1 0x200b +#define MASK_CUSTOM0_RS1 0x707f +#define MATCH_CUSTOM0_RS1_RS2 0x300b +#define MASK_CUSTOM0_RS1_RS2 0x707f +#define MATCH_CUSTOM0_RD 0x400b +#define MASK_CUSTOM0_RD 0x707f +#define MATCH_CUSTOM0_RD_RS1 0x600b +#define MASK_CUSTOM0_RD_RS1 0x707f +#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b +#define MASK_CUSTOM0_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM1 0x2b +#define MASK_CUSTOM1 0x707f +#define MATCH_CUSTOM1_RS1 0x202b +#define MASK_CUSTOM1_RS1 0x707f +#define MATCH_CUSTOM1_RS1_RS2 0x302b +#define MASK_CUSTOM1_RS1_RS2 0x707f +#define MATCH_CUSTOM1_RD 0x402b +#define MASK_CUSTOM1_RD 0x707f +#define MATCH_CUSTOM1_RD_RS1 0x602b +#define MASK_CUSTOM1_RD_RS1 0x707f +#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b +#define MASK_CUSTOM1_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM2 0x5b +#define MASK_CUSTOM2 0x707f +#define MATCH_CUSTOM2_RS1 0x205b +#define MASK_CUSTOM2_RS1 0x707f +#define MATCH_CUSTOM2_RS1_RS2 0x305b +#define MASK_CUSTOM2_RS1_RS2 0x707f +#define MATCH_CUSTOM2_RD 0x405b +#define MASK_CUSTOM2_RD 0x707f +#define MATCH_CUSTOM2_RD_RS1 0x605b +#define MASK_CUSTOM2_RD_RS1 0x707f +#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b +#define MASK_CUSTOM2_RD_RS1_RS2 0x707f +#define MATCH_CUSTOM3 0x7b +#define MASK_CUSTOM3 0x707f +#define MATCH_CUSTOM3_RS1 0x207b +#define MASK_CUSTOM3_RS1 0x707f +#define MATCH_CUSTOM3_RS1_RS2 0x307b +#define MASK_CUSTOM3_RS1_RS2 0x707f +#define MATCH_CUSTOM3_RD 0x407b +#define MASK_CUSTOM3_RD 0x707f +#define MATCH_CUSTOM3_RD_RS1 0x607b +#define MASK_CUSTOM3_RD_RS1 0x707f +#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b +#define MASK_CUSTOM3_RD_RS1_RS2 0x707f +#define CSR_FFLAGS 0x1 +#define CSR_FRM 0x2 +#define CSR_FCSR 0x3 +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_SSTATUS 0x100 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 +#define CSR_SATP 0x180 +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MEDELEG 0x302 +#define CSR_MIDELEG 0x303 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MCOUNTEREN 0x306 +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPCFG1 0x3a1 +#define CSR_PMPCFG2 0x3a2 +#define CSR_PMPCFG3 0x3a3 +#define CSR_PMPADDR0 0x3b0 +#define CSR_PMPADDR1 0x3b1 +#define CSR_PMPADDR2 0x3b2 +#define CSR_PMPADDR3 0x3b3 +#define CSR_PMPADDR4 0x3b4 +#define CSR_PMPADDR5 0x3b5 +#define CSR_PMPADDR6 0x3b6 +#define CSR_PMPADDR7 0x3b7 +#define CSR_PMPADDR8 0x3b8 +#define CSR_PMPADDR9 0x3b9 +#define CSR_PMPADDR10 0x3ba +#define CSR_PMPADDR11 0x3bb +#define CSR_PMPADDR12 0x3bc +#define CSR_PMPADDR13 0x3bd +#define CSR_PMPADDR14 0x3be +#define CSR_PMPADDR15 0x3bf +#define CSR_TSELECT 0x7a0 +#define CSR_TDATA1 0x7a1 +#define CSR_TDATA2 0x7a2 +#define CSR_TDATA3 0x7a3 +#define CSR_DCSR 0x7b0 +#define CSR_DPC 0x7b1 +#define CSR_DSCRATCH 0x7b2 +#define CSR_MCYCLE 0xb00 +#define CSR_MINSTRET 0xb02 +#define CSR_MHPMCOUNTER3 0xb03 +#define CSR_MHPMCOUNTER4 0xb04 +#define CSR_MHPMCOUNTER5 0xb05 +#define CSR_MHPMCOUNTER6 0xb06 +#define CSR_MHPMCOUNTER7 0xb07 +#define CSR_MHPMCOUNTER8 0xb08 +#define CSR_MHPMCOUNTER9 0xb09 +#define CSR_MHPMCOUNTER10 0xb0a +#define CSR_MHPMCOUNTER11 0xb0b +#define CSR_MHPMCOUNTER12 0xb0c +#define CSR_MHPMCOUNTER13 0xb0d +#define CSR_MHPMCOUNTER14 0xb0e +#define CSR_MHPMCOUNTER15 0xb0f +#define CSR_MHPMCOUNTER16 0xb10 +#define CSR_MHPMCOUNTER17 0xb11 +#define CSR_MHPMCOUNTER18 0xb12 +#define CSR_MHPMCOUNTER19 0xb13 +#define CSR_MHPMCOUNTER20 0xb14 +#define CSR_MHPMCOUNTER21 0xb15 +#define CSR_MHPMCOUNTER22 0xb16 +#define CSR_MHPMCOUNTER23 0xb17 +#define CSR_MHPMCOUNTER24 0xb18 +#define CSR_MHPMCOUNTER25 0xb19 +#define CSR_MHPMCOUNTER26 0xb1a +#define CSR_MHPMCOUNTER27 0xb1b +#define CSR_MHPMCOUNTER28 0xb1c +#define CSR_MHPMCOUNTER29 0xb1d +#define CSR_MHPMCOUNTER30 0xb1e +#define CSR_MHPMCOUNTER31 0xb1f +#define CSR_MHPMEVENT3 0x323 +#define CSR_MHPMEVENT4 0x324 +#define CSR_MHPMEVENT5 0x325 +#define CSR_MHPMEVENT6 0x326 +#define CSR_MHPMEVENT7 0x327 +#define CSR_MHPMEVENT8 0x328 +#define CSR_MHPMEVENT9 0x329 +#define CSR_MHPMEVENT10 0x32a +#define CSR_MHPMEVENT11 0x32b +#define CSR_MHPMEVENT12 0x32c +#define CSR_MHPMEVENT13 0x32d +#define CSR_MHPMEVENT14 0x32e +#define CSR_MHPMEVENT15 0x32f +#define CSR_MHPMEVENT16 0x330 +#define CSR_MHPMEVENT17 0x331 +#define CSR_MHPMEVENT18 0x332 +#define CSR_MHPMEVENT19 0x333 +#define CSR_MHPMEVENT20 0x334 +#define CSR_MHPMEVENT21 0x335 +#define CSR_MHPMEVENT22 0x336 +#define CSR_MHPMEVENT23 0x337 +#define CSR_MHPMEVENT24 0x338 +#define CSR_MHPMEVENT25 0x339 +#define CSR_MHPMEVENT26 0x33a +#define CSR_MHPMEVENT27 0x33b +#define CSR_MHPMEVENT28 0x33c +#define CSR_MHPMEVENT29 0x33d +#define CSR_MHPMEVENT30 0x33e +#define CSR_MHPMEVENT31 0x33f +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f +#define CSR_MCYCLEH 0xb80 +#define CSR_MINSTRETH 0xb82 +#define CSR_MHPMCOUNTER3H 0xb83 +#define CSR_MHPMCOUNTER4H 0xb84 +#define CSR_MHPMCOUNTER5H 0xb85 +#define CSR_MHPMCOUNTER6H 0xb86 +#define CSR_MHPMCOUNTER7H 0xb87 +#define CSR_MHPMCOUNTER8H 0xb88 +#define CSR_MHPMCOUNTER9H 0xb89 +#define CSR_MHPMCOUNTER10H 0xb8a +#define CSR_MHPMCOUNTER11H 0xb8b +#define CSR_MHPMCOUNTER12H 0xb8c +#define CSR_MHPMCOUNTER13H 0xb8d +#define CSR_MHPMCOUNTER14H 0xb8e +#define CSR_MHPMCOUNTER15H 0xb8f +#define CSR_MHPMCOUNTER16H 0xb90 +#define CSR_MHPMCOUNTER17H 0xb91 +#define CSR_MHPMCOUNTER18H 0xb92 +#define CSR_MHPMCOUNTER19H 0xb93 +#define CSR_MHPMCOUNTER20H 0xb94 +#define CSR_MHPMCOUNTER21H 0xb95 +#define CSR_MHPMCOUNTER22H 0xb96 +#define CSR_MHPMCOUNTER23H 0xb97 +#define CSR_MHPMCOUNTER24H 0xb98 +#define CSR_MHPMCOUNTER25H 0xb99 +#define CSR_MHPMCOUNTER26H 0xb9a +#define CSR_MHPMCOUNTER27H 0xb9b +#define CSR_MHPMCOUNTER28H 0xb9c +#define CSR_MHPMCOUNTER29H 0xb9d +#define CSR_MHPMCOUNTER30H 0xb9e +#define CSR_MHPMCOUNTER31H 0xb9f +#define CAUSE_MISALIGNED_FETCH 0x0 +#define CAUSE_FETCH_ACCESS 0x1 +#define CAUSE_ILLEGAL_INSTRUCTION 0x2 +#define CAUSE_BREAKPOINT 0x3 +#define CAUSE_MISALIGNED_LOAD 0x4 +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_MISALIGNED_STORE 0x6 +#define CAUSE_STORE_ACCESS 0x7 +#define CAUSE_USER_ECALL 0x8 +#define CAUSE_SUPERVISOR_ECALL 0x9 +#define CAUSE_HYPERVISOR_ECALL 0xa +#define CAUSE_MACHINE_ECALL 0xb +#define CAUSE_FETCH_PAGE_FAULT 0xc +#define CAUSE_LOAD_PAGE_FAULT 0xd +#define CAUSE_STORE_PAGE_FAULT 0xf +#endif +#ifdef DECLARE_INSN +DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ) +DECLARE_INSN(bne, MATCH_BNE, MASK_BNE) +DECLARE_INSN(blt, MATCH_BLT, MASK_BLT) +DECLARE_INSN(bge, MATCH_BGE, MASK_BGE) +DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU) +DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU) +DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR) +DECLARE_INSN(jal, MATCH_JAL, MASK_JAL) +DECLARE_INSN(lui, MATCH_LUI, MASK_LUI) +DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC) +DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI) +DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI) +DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI) +DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU) +DECLARE_INSN(xori, MATCH_XORI, MASK_XORI) +DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI) +DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI) +DECLARE_INSN(ori, MATCH_ORI, MASK_ORI) +DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI) +DECLARE_INSN(add, MATCH_ADD, MASK_ADD) +DECLARE_INSN(sub, MATCH_SUB, MASK_SUB) +DECLARE_INSN(sll, MATCH_SLL, MASK_SLL) +DECLARE_INSN(slt, MATCH_SLT, MASK_SLT) +DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU) +DECLARE_INSN(xor, MATCH_XOR, MASK_XOR) +DECLARE_INSN(srl, MATCH_SRL, MASK_SRL) +DECLARE_INSN(sra, MATCH_SRA, MASK_SRA) +DECLARE_INSN(or, MATCH_OR, MASK_OR) +DECLARE_INSN(and, MATCH_AND, MASK_AND) +DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW) +DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW) +DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW) +DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW) +DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW) +DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW) +DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW) +DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW) +DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW) +DECLARE_INSN(lb, MATCH_LB, MASK_LB) +DECLARE_INSN(lh, MATCH_LH, MASK_LH) +DECLARE_INSN(lw, MATCH_LW, MASK_LW) +DECLARE_INSN(ld, MATCH_LD, MASK_LD) +DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU) +DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU) +DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU) +DECLARE_INSN(sb, MATCH_SB, MASK_SB) +DECLARE_INSN(sh, MATCH_SH, MASK_SH) +DECLARE_INSN(sw, MATCH_SW, MASK_SW) +DECLARE_INSN(sd, MATCH_SD, MASK_SD) +DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE) +DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I) +DECLARE_INSN(mul, MATCH_MUL, MASK_MUL) +DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH) +DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU) +DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU) +DECLARE_INSN(div, MATCH_DIV, MASK_DIV) +DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU) +DECLARE_INSN(rem, MATCH_REM, MASK_REM) +DECLARE_INSN(remu, MATCH_REMU, MASK_REMU) +DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW) +DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW) +DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW) +DECLARE_INSN(remw, MATCH_REMW, MASK_REMW) +DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW) +DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W) +DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W) +DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W) +DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W) +DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W) +DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W) +DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W) +DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W) +DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W) +DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W) +DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W) +DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D) +DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D) +DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D) +DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D) +DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D) +DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D) +DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D) +DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D) +DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D) +DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D) +DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D) +DECLARE_INSN(ecall, MATCH_ECALL, MASK_ECALL) +DECLARE_INSN(ebreak, MATCH_EBREAK, MASK_EBREAK) +DECLARE_INSN(uret, MATCH_URET, MASK_URET) +DECLARE_INSN(sret, MATCH_SRET, MASK_SRET) +DECLARE_INSN(mret, MATCH_MRET, MASK_MRET) +DECLARE_INSN(dret, MATCH_DRET, MASK_DRET) +DECLARE_INSN(sfence_vma, MATCH_SFENCE_VMA, MASK_SFENCE_VMA) +DECLARE_INSN(wfi, MATCH_WFI, MASK_WFI) +DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW) +DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS) +DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC) +DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI) +DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI) +DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI) +DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S) +DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S) +DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S) +DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S) +DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S) +DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S) +DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S) +DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S) +DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S) +DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S) +DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D) +DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D) +DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D) +DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D) +DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D) +DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D) +DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D) +DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D) +DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D) +DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D) +DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S) +DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D) +DECLARE_INSN(fadd_q, MATCH_FADD_Q, MASK_FADD_Q) +DECLARE_INSN(fsub_q, MATCH_FSUB_Q, MASK_FSUB_Q) +DECLARE_INSN(fmul_q, MATCH_FMUL_Q, MASK_FMUL_Q) +DECLARE_INSN(fdiv_q, MATCH_FDIV_Q, MASK_FDIV_Q) +DECLARE_INSN(fsgnj_q, MATCH_FSGNJ_Q, MASK_FSGNJ_Q) +DECLARE_INSN(fsgnjn_q, MATCH_FSGNJN_Q, MASK_FSGNJN_Q) +DECLARE_INSN(fsgnjx_q, MATCH_FSGNJX_Q, MASK_FSGNJX_Q) +DECLARE_INSN(fmin_q, MATCH_FMIN_Q, MASK_FMIN_Q) +DECLARE_INSN(fmax_q, MATCH_FMAX_Q, MASK_FMAX_Q) +DECLARE_INSN(fcvt_s_q, MATCH_FCVT_S_Q, MASK_FCVT_S_Q) +DECLARE_INSN(fcvt_q_s, MATCH_FCVT_Q_S, MASK_FCVT_Q_S) +DECLARE_INSN(fcvt_d_q, MATCH_FCVT_D_Q, MASK_FCVT_D_Q) +DECLARE_INSN(fcvt_q_d, MATCH_FCVT_Q_D, MASK_FCVT_Q_D) +DECLARE_INSN(fsqrt_q, MATCH_FSQRT_Q, MASK_FSQRT_Q) +DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S) +DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S) +DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S) +DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D) +DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D) +DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D) +DECLARE_INSN(fle_q, MATCH_FLE_Q, MASK_FLE_Q) +DECLARE_INSN(flt_q, MATCH_FLT_Q, MASK_FLT_Q) +DECLARE_INSN(feq_q, MATCH_FEQ_Q, MASK_FEQ_Q) +DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S) +DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S) +DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S) +DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S) +DECLARE_INSN(fmv_x_w, MATCH_FMV_X_W, MASK_FMV_X_W) +DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S) +DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D) +DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D) +DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D) +DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D) +DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D) +DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D) +DECLARE_INSN(fcvt_w_q, MATCH_FCVT_W_Q, MASK_FCVT_W_Q) +DECLARE_INSN(fcvt_wu_q, MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q) +DECLARE_INSN(fcvt_l_q, MATCH_FCVT_L_Q, MASK_FCVT_L_Q) +DECLARE_INSN(fcvt_lu_q, MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q) +DECLARE_INSN(fmv_x_q, MATCH_FMV_X_Q, MASK_FMV_X_Q) +DECLARE_INSN(fclass_q, MATCH_FCLASS_Q, MASK_FCLASS_Q) +DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W) +DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU) +DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L) +DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU) +DECLARE_INSN(fmv_w_x, MATCH_FMV_W_X, MASK_FMV_W_X) +DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W) +DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU) +DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L) +DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU) +DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X) +DECLARE_INSN(fcvt_q_w, MATCH_FCVT_Q_W, MASK_FCVT_Q_W) +DECLARE_INSN(fcvt_q_wu, MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU) +DECLARE_INSN(fcvt_q_l, MATCH_FCVT_Q_L, MASK_FCVT_Q_L) +DECLARE_INSN(fcvt_q_lu, MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU) +DECLARE_INSN(fmv_q_x, MATCH_FMV_Q_X, MASK_FMV_Q_X) +DECLARE_INSN(flw, MATCH_FLW, MASK_FLW) +DECLARE_INSN(fld, MATCH_FLD, MASK_FLD) +DECLARE_INSN(flq, MATCH_FLQ, MASK_FLQ) +DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW) +DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD) +DECLARE_INSN(fsq, MATCH_FSQ, MASK_FSQ) +DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S) +DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S) +DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S) +DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S) +DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D) +DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D) +DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D) +DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D) +DECLARE_INSN(fmadd_q, MATCH_FMADD_Q, MASK_FMADD_Q) +DECLARE_INSN(fmsub_q, MATCH_FMSUB_Q, MASK_FMSUB_Q) +DECLARE_INSN(fnmsub_q, MATCH_FNMSUB_Q, MASK_FNMSUB_Q) +DECLARE_INSN(fnmadd_q, MATCH_FNMADD_Q, MASK_FNMADD_Q) +DECLARE_INSN(c_nop, MATCH_C_NOP, MASK_C_NOP) +DECLARE_INSN(c_addi16sp, MATCH_C_ADDI16SP, MASK_C_ADDI16SP) +DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR) +DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR) +DECLARE_INSN(c_ebreak, MATCH_C_EBREAK, MASK_C_EBREAK) +DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD) +DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD) +DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW) +DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP) +DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP) +DECLARE_INSN(c_addi4spn, MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN) +DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD) +DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW) +DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW) +DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD) +DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW) +DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW) +DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI) +DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL) +DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI) +DECLARE_INSN(c_lui, MATCH_C_LUI, MASK_C_LUI) +DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI) +DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI) +DECLARE_INSN(c_andi, MATCH_C_ANDI, MASK_C_ANDI) +DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB) +DECLARE_INSN(c_xor, MATCH_C_XOR, MASK_C_XOR) +DECLARE_INSN(c_or, MATCH_C_OR, MASK_C_OR) +DECLARE_INSN(c_and, MATCH_C_AND, MASK_C_AND) +DECLARE_INSN(c_subw, MATCH_C_SUBW, MASK_C_SUBW) +DECLARE_INSN(c_addw, MATCH_C_ADDW, MASK_C_ADDW) +DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J) +DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ) +DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ) +DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI) +DECLARE_INSN(c_fldsp, MATCH_C_FLDSP, MASK_C_FLDSP) +DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP) +DECLARE_INSN(c_flwsp, MATCH_C_FLWSP, MASK_C_FLWSP) +DECLARE_INSN(c_mv, MATCH_C_MV, MASK_C_MV) +DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD) +DECLARE_INSN(c_fsdsp, MATCH_C_FSDSP, MASK_C_FSDSP) +DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP) +DECLARE_INSN(c_fswsp, MATCH_C_FSWSP, MASK_C_FSWSP) +DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0) +DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1) +DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2) +DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD) +DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1) +DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2) +DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1) +DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1) +DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2) +DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD) +DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1) +DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2) +DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2) +DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1) +DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2) +DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD) +DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1) +DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2) +DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3) +DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1) +DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2) +DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD) +DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1) +DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2) +#endif +#ifdef DECLARE_CSR +DECLARE_CSR(fflags, CSR_FFLAGS) +DECLARE_CSR(frm, CSR_FRM) +DECLARE_CSR(fcsr, CSR_FCSR) +DECLARE_CSR(cycle, CSR_CYCLE) +DECLARE_CSR(time, CSR_TIME) +DECLARE_CSR(instret, CSR_INSTRET) +DECLARE_CSR(hpmcounter3, CSR_HPMCOUNTER3) +DECLARE_CSR(hpmcounter4, CSR_HPMCOUNTER4) +DECLARE_CSR(hpmcounter5, CSR_HPMCOUNTER5) +DECLARE_CSR(hpmcounter6, CSR_HPMCOUNTER6) +DECLARE_CSR(hpmcounter7, CSR_HPMCOUNTER7) +DECLARE_CSR(hpmcounter8, CSR_HPMCOUNTER8) +DECLARE_CSR(hpmcounter9, CSR_HPMCOUNTER9) +DECLARE_CSR(hpmcounter10, CSR_HPMCOUNTER10) +DECLARE_CSR(hpmcounter11, CSR_HPMCOUNTER11) +DECLARE_CSR(hpmcounter12, CSR_HPMCOUNTER12) +DECLARE_CSR(hpmcounter13, CSR_HPMCOUNTER13) +DECLARE_CSR(hpmcounter14, CSR_HPMCOUNTER14) +DECLARE_CSR(hpmcounter15, CSR_HPMCOUNTER15) +DECLARE_CSR(hpmcounter16, CSR_HPMCOUNTER16) +DECLARE_CSR(hpmcounter17, CSR_HPMCOUNTER17) +DECLARE_CSR(hpmcounter18, CSR_HPMCOUNTER18) +DECLARE_CSR(hpmcounter19, CSR_HPMCOUNTER19) +DECLARE_CSR(hpmcounter20, CSR_HPMCOUNTER20) +DECLARE_CSR(hpmcounter21, CSR_HPMCOUNTER21) +DECLARE_CSR(hpmcounter22, CSR_HPMCOUNTER22) +DECLARE_CSR(hpmcounter23, CSR_HPMCOUNTER23) +DECLARE_CSR(hpmcounter24, CSR_HPMCOUNTER24) +DECLARE_CSR(hpmcounter25, CSR_HPMCOUNTER25) +DECLARE_CSR(hpmcounter26, CSR_HPMCOUNTER26) +DECLARE_CSR(hpmcounter27, CSR_HPMCOUNTER27) +DECLARE_CSR(hpmcounter28, CSR_HPMCOUNTER28) +DECLARE_CSR(hpmcounter29, CSR_HPMCOUNTER29) +DECLARE_CSR(hpmcounter30, CSR_HPMCOUNTER30) +DECLARE_CSR(hpmcounter31, CSR_HPMCOUNTER31) +DECLARE_CSR(sstatus, CSR_SSTATUS) +DECLARE_CSR(sie, CSR_SIE) +DECLARE_CSR(stvec, CSR_STVEC) +DECLARE_CSR(scounteren, CSR_SCOUNTEREN) +DECLARE_CSR(sscratch, CSR_SSCRATCH) +DECLARE_CSR(sepc, CSR_SEPC) +DECLARE_CSR(scause, CSR_SCAUSE) +DECLARE_CSR(stval, CSR_STVAL) +DECLARE_CSR(sip, CSR_SIP) +DECLARE_CSR(satp, CSR_SATP) +DECLARE_CSR(mstatus, CSR_MSTATUS) +DECLARE_CSR(misa, CSR_MISA) +DECLARE_CSR(medeleg, CSR_MEDELEG) +DECLARE_CSR(mideleg, CSR_MIDELEG) +DECLARE_CSR(mie, CSR_MIE) +DECLARE_CSR(mtvec, CSR_MTVEC) +DECLARE_CSR(mcounteren, CSR_MCOUNTEREN) +DECLARE_CSR(mscratch, CSR_MSCRATCH) +DECLARE_CSR(mepc, CSR_MEPC) +DECLARE_CSR(mcause, CSR_MCAUSE) +DECLARE_CSR(mtval, CSR_MTVAL) +DECLARE_CSR(mip, CSR_MIP) +DECLARE_CSR(pmpcfg0, CSR_PMPCFG0) +DECLARE_CSR(pmpcfg1, CSR_PMPCFG1) +DECLARE_CSR(pmpcfg2, CSR_PMPCFG2) +DECLARE_CSR(pmpcfg3, CSR_PMPCFG3) +DECLARE_CSR(pmpaddr0, CSR_PMPADDR0) +DECLARE_CSR(pmpaddr1, CSR_PMPADDR1) +DECLARE_CSR(pmpaddr2, CSR_PMPADDR2) +DECLARE_CSR(pmpaddr3, CSR_PMPADDR3) +DECLARE_CSR(pmpaddr4, CSR_PMPADDR4) +DECLARE_CSR(pmpaddr5, CSR_PMPADDR5) +DECLARE_CSR(pmpaddr6, CSR_PMPADDR6) +DECLARE_CSR(pmpaddr7, CSR_PMPADDR7) +DECLARE_CSR(pmpaddr8, CSR_PMPADDR8) +DECLARE_CSR(pmpaddr9, CSR_PMPADDR9) +DECLARE_CSR(pmpaddr10, CSR_PMPADDR10) +DECLARE_CSR(pmpaddr11, CSR_PMPADDR11) +DECLARE_CSR(pmpaddr12, CSR_PMPADDR12) +DECLARE_CSR(pmpaddr13, CSR_PMPADDR13) +DECLARE_CSR(pmpaddr14, CSR_PMPADDR14) +DECLARE_CSR(pmpaddr15, CSR_PMPADDR15) +DECLARE_CSR(tselect, CSR_TSELECT) +DECLARE_CSR(tdata1, CSR_TDATA1) +DECLARE_CSR(tdata2, CSR_TDATA2) +DECLARE_CSR(tdata3, CSR_TDATA3) +DECLARE_CSR(dcsr, CSR_DCSR) +DECLARE_CSR(dpc, CSR_DPC) +DECLARE_CSR(dscratch, CSR_DSCRATCH) +DECLARE_CSR(mcycle, CSR_MCYCLE) +DECLARE_CSR(minstret, CSR_MINSTRET) +DECLARE_CSR(mhpmcounter3, CSR_MHPMCOUNTER3) +DECLARE_CSR(mhpmcounter4, CSR_MHPMCOUNTER4) +DECLARE_CSR(mhpmcounter5, CSR_MHPMCOUNTER5) +DECLARE_CSR(mhpmcounter6, CSR_MHPMCOUNTER6) +DECLARE_CSR(mhpmcounter7, CSR_MHPMCOUNTER7) +DECLARE_CSR(mhpmcounter8, CSR_MHPMCOUNTER8) +DECLARE_CSR(mhpmcounter9, CSR_MHPMCOUNTER9) +DECLARE_CSR(mhpmcounter10, CSR_MHPMCOUNTER10) +DECLARE_CSR(mhpmcounter11, CSR_MHPMCOUNTER11) +DECLARE_CSR(mhpmcounter12, CSR_MHPMCOUNTER12) +DECLARE_CSR(mhpmcounter13, CSR_MHPMCOUNTER13) +DECLARE_CSR(mhpmcounter14, CSR_MHPMCOUNTER14) +DECLARE_CSR(mhpmcounter15, CSR_MHPMCOUNTER15) +DECLARE_CSR(mhpmcounter16, CSR_MHPMCOUNTER16) +DECLARE_CSR(mhpmcounter17, CSR_MHPMCOUNTER17) +DECLARE_CSR(mhpmcounter18, CSR_MHPMCOUNTER18) +DECLARE_CSR(mhpmcounter19, CSR_MHPMCOUNTER19) +DECLARE_CSR(mhpmcounter20, CSR_MHPMCOUNTER20) +DECLARE_CSR(mhpmcounter21, CSR_MHPMCOUNTER21) +DECLARE_CSR(mhpmcounter22, CSR_MHPMCOUNTER22) +DECLARE_CSR(mhpmcounter23, CSR_MHPMCOUNTER23) +DECLARE_CSR(mhpmcounter24, CSR_MHPMCOUNTER24) +DECLARE_CSR(mhpmcounter25, CSR_MHPMCOUNTER25) +DECLARE_CSR(mhpmcounter26, CSR_MHPMCOUNTER26) +DECLARE_CSR(mhpmcounter27, CSR_MHPMCOUNTER27) +DECLARE_CSR(mhpmcounter28, CSR_MHPMCOUNTER28) +DECLARE_CSR(mhpmcounter29, CSR_MHPMCOUNTER29) +DECLARE_CSR(mhpmcounter30, CSR_MHPMCOUNTER30) +DECLARE_CSR(mhpmcounter31, CSR_MHPMCOUNTER31) +DECLARE_CSR(mhpmevent3, CSR_MHPMEVENT3) +DECLARE_CSR(mhpmevent4, CSR_MHPMEVENT4) +DECLARE_CSR(mhpmevent5, CSR_MHPMEVENT5) +DECLARE_CSR(mhpmevent6, CSR_MHPMEVENT6) +DECLARE_CSR(mhpmevent7, CSR_MHPMEVENT7) +DECLARE_CSR(mhpmevent8, CSR_MHPMEVENT8) +DECLARE_CSR(mhpmevent9, CSR_MHPMEVENT9) +DECLARE_CSR(mhpmevent10, CSR_MHPMEVENT10) +DECLARE_CSR(mhpmevent11, CSR_MHPMEVENT11) +DECLARE_CSR(mhpmevent12, CSR_MHPMEVENT12) +DECLARE_CSR(mhpmevent13, CSR_MHPMEVENT13) +DECLARE_CSR(mhpmevent14, CSR_MHPMEVENT14) +DECLARE_CSR(mhpmevent15, CSR_MHPMEVENT15) +DECLARE_CSR(mhpmevent16, CSR_MHPMEVENT16) +DECLARE_CSR(mhpmevent17, CSR_MHPMEVENT17) +DECLARE_CSR(mhpmevent18, CSR_MHPMEVENT18) +DECLARE_CSR(mhpmevent19, CSR_MHPMEVENT19) +DECLARE_CSR(mhpmevent20, CSR_MHPMEVENT20) +DECLARE_CSR(mhpmevent21, CSR_MHPMEVENT21) +DECLARE_CSR(mhpmevent22, CSR_MHPMEVENT22) +DECLARE_CSR(mhpmevent23, CSR_MHPMEVENT23) +DECLARE_CSR(mhpmevent24, CSR_MHPMEVENT24) +DECLARE_CSR(mhpmevent25, CSR_MHPMEVENT25) +DECLARE_CSR(mhpmevent26, CSR_MHPMEVENT26) +DECLARE_CSR(mhpmevent27, CSR_MHPMEVENT27) +DECLARE_CSR(mhpmevent28, CSR_MHPMEVENT28) +DECLARE_CSR(mhpmevent29, CSR_MHPMEVENT29) +DECLARE_CSR(mhpmevent30, CSR_MHPMEVENT30) +DECLARE_CSR(mhpmevent31, CSR_MHPMEVENT31) +DECLARE_CSR(mvendorid, CSR_MVENDORID) +DECLARE_CSR(marchid, CSR_MARCHID) +DECLARE_CSR(mimpid, CSR_MIMPID) +DECLARE_CSR(mhartid, CSR_MHARTID) +DECLARE_CSR(cycleh, CSR_CYCLEH) +DECLARE_CSR(timeh, CSR_TIMEH) +DECLARE_CSR(instreth, CSR_INSTRETH) +DECLARE_CSR(hpmcounter3h, CSR_HPMCOUNTER3H) +DECLARE_CSR(hpmcounter4h, CSR_HPMCOUNTER4H) +DECLARE_CSR(hpmcounter5h, CSR_HPMCOUNTER5H) +DECLARE_CSR(hpmcounter6h, CSR_HPMCOUNTER6H) +DECLARE_CSR(hpmcounter7h, CSR_HPMCOUNTER7H) +DECLARE_CSR(hpmcounter8h, CSR_HPMCOUNTER8H) +DECLARE_CSR(hpmcounter9h, CSR_HPMCOUNTER9H) +DECLARE_CSR(hpmcounter10h, CSR_HPMCOUNTER10H) +DECLARE_CSR(hpmcounter11h, CSR_HPMCOUNTER11H) +DECLARE_CSR(hpmcounter12h, CSR_HPMCOUNTER12H) +DECLARE_CSR(hpmcounter13h, CSR_HPMCOUNTER13H) +DECLARE_CSR(hpmcounter14h, CSR_HPMCOUNTER14H) +DECLARE_CSR(hpmcounter15h, CSR_HPMCOUNTER15H) +DECLARE_CSR(hpmcounter16h, CSR_HPMCOUNTER16H) +DECLARE_CSR(hpmcounter17h, CSR_HPMCOUNTER17H) +DECLARE_CSR(hpmcounter18h, CSR_HPMCOUNTER18H) +DECLARE_CSR(hpmcounter19h, CSR_HPMCOUNTER19H) +DECLARE_CSR(hpmcounter20h, CSR_HPMCOUNTER20H) +DECLARE_CSR(hpmcounter21h, CSR_HPMCOUNTER21H) +DECLARE_CSR(hpmcounter22h, CSR_HPMCOUNTER22H) +DECLARE_CSR(hpmcounter23h, CSR_HPMCOUNTER23H) +DECLARE_CSR(hpmcounter24h, CSR_HPMCOUNTER24H) +DECLARE_CSR(hpmcounter25h, CSR_HPMCOUNTER25H) +DECLARE_CSR(hpmcounter26h, CSR_HPMCOUNTER26H) +DECLARE_CSR(hpmcounter27h, CSR_HPMCOUNTER27H) +DECLARE_CSR(hpmcounter28h, CSR_HPMCOUNTER28H) +DECLARE_CSR(hpmcounter29h, CSR_HPMCOUNTER29H) +DECLARE_CSR(hpmcounter30h, CSR_HPMCOUNTER30H) +DECLARE_CSR(hpmcounter31h, CSR_HPMCOUNTER31H) +DECLARE_CSR(mcycleh, CSR_MCYCLEH) +DECLARE_CSR(minstreth, CSR_MINSTRETH) +DECLARE_CSR(mhpmcounter3h, CSR_MHPMCOUNTER3H) +DECLARE_CSR(mhpmcounter4h, CSR_MHPMCOUNTER4H) +DECLARE_CSR(mhpmcounter5h, CSR_MHPMCOUNTER5H) +DECLARE_CSR(mhpmcounter6h, CSR_MHPMCOUNTER6H) +DECLARE_CSR(mhpmcounter7h, CSR_MHPMCOUNTER7H) +DECLARE_CSR(mhpmcounter8h, CSR_MHPMCOUNTER8H) +DECLARE_CSR(mhpmcounter9h, CSR_MHPMCOUNTER9H) +DECLARE_CSR(mhpmcounter10h, CSR_MHPMCOUNTER10H) +DECLARE_CSR(mhpmcounter11h, CSR_MHPMCOUNTER11H) +DECLARE_CSR(mhpmcounter12h, CSR_MHPMCOUNTER12H) +DECLARE_CSR(mhpmcounter13h, CSR_MHPMCOUNTER13H) +DECLARE_CSR(mhpmcounter14h, CSR_MHPMCOUNTER14H) +DECLARE_CSR(mhpmcounter15h, CSR_MHPMCOUNTER15H) +DECLARE_CSR(mhpmcounter16h, CSR_MHPMCOUNTER16H) +DECLARE_CSR(mhpmcounter17h, CSR_MHPMCOUNTER17H) +DECLARE_CSR(mhpmcounter18h, CSR_MHPMCOUNTER18H) +DECLARE_CSR(mhpmcounter19h, CSR_MHPMCOUNTER19H) +DECLARE_CSR(mhpmcounter20h, CSR_MHPMCOUNTER20H) +DECLARE_CSR(mhpmcounter21h, CSR_MHPMCOUNTER21H) +DECLARE_CSR(mhpmcounter22h, CSR_MHPMCOUNTER22H) +DECLARE_CSR(mhpmcounter23h, CSR_MHPMCOUNTER23H) +DECLARE_CSR(mhpmcounter24h, CSR_MHPMCOUNTER24H) +DECLARE_CSR(mhpmcounter25h, CSR_MHPMCOUNTER25H) +DECLARE_CSR(mhpmcounter26h, CSR_MHPMCOUNTER26H) +DECLARE_CSR(mhpmcounter27h, CSR_MHPMCOUNTER27H) +DECLARE_CSR(mhpmcounter28h, CSR_MHPMCOUNTER28H) +DECLARE_CSR(mhpmcounter29h, CSR_MHPMCOUNTER29H) +DECLARE_CSR(mhpmcounter30h, CSR_MHPMCOUNTER30H) +DECLARE_CSR(mhpmcounter31h, CSR_MHPMCOUNTER31H) +#endif +#ifdef DECLARE_CAUSE +DECLARE_CAUSE("misaligned fetch", CAUSE_MISALIGNED_FETCH) +DECLARE_CAUSE("fetch access", CAUSE_FETCH_ACCESS) +DECLARE_CAUSE("illegal instruction", CAUSE_ILLEGAL_INSTRUCTION) +DECLARE_CAUSE("breakpoint", CAUSE_BREAKPOINT) +DECLARE_CAUSE("misaligned load", CAUSE_MISALIGNED_LOAD) +DECLARE_CAUSE("load access", CAUSE_LOAD_ACCESS) +DECLARE_CAUSE("misaligned store", CAUSE_MISALIGNED_STORE) +DECLARE_CAUSE("store access", CAUSE_STORE_ACCESS) +DECLARE_CAUSE("user_ecall", CAUSE_USER_ECALL) +DECLARE_CAUSE("supervisor_ecall", CAUSE_SUPERVISOR_ECALL) +DECLARE_CAUSE("hypervisor_ecall", CAUSE_HYPERVISOR_ECALL) +DECLARE_CAUSE("machine_ecall", CAUSE_MACHINE_ECALL) +DECLARE_CAUSE("fetch page fault", CAUSE_FETCH_PAGE_FAULT) +DECLARE_CAUSE("load page fault", CAUSE_LOAD_PAGE_FAULT) +DECLARE_CAUSE("store page fault", CAUSE_STORE_PAGE_FAULT) +#endif diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/Makefile b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/Makefile new file mode 100644 index 00000000..839a68e5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/Makefile @@ -0,0 +1,17 @@ + +TOT = .. +gengen = $(TOT)/gengen_tool/gengen + +default: + rm -f test_*.h + $(gengen) -i test_pmp_ok_1.cc_skel --file-name test_pmp_ok_1.h --gen-name pmp_ok_1 + $(gengen) -i test_pmp_csr_1.cc_skel --file-name test_pmp_csr_1.h --gen-name pmp_csr_1 + $(gengen) -i test_pmp_ok_share_1.cc_skel --file-name test_pmp_ok_share_1.h --gen-name pmp_ok_share_1 + +gen: + -rm -rf outputs; mkdir outputs + g++ -g3 -O2 gen_pmp_test.cc -o a.out + ./a.out + +clean: + rm -rf test_*.h a.out outputs/* diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/gen_pmp_test.cc b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/gen_pmp_test.cc new file mode 100644 index 00000000..e464aa1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/gen_pmp_test.cc @@ -0,0 +1,379 @@ +/* + * insn_utest-1.cc + * + * Created on: Mar.6 2020 + * Author: soberl + */ +#include +#include +#include +#include +#include + +#include +#include + +#include "test_pmp_ok_1.h" +#include "test_pmp_ok_share_1.h" +#include "test_pmp_csr_1.h" + +#define GEN_ALL 1 + +namespace { + +const unsigned expected_files_count[] = { + 256 - 64, + 528, + 24, + 0 +}; + + std::ostringstream str_buffer, val_buffer; + std::ofstream m_ofstream; + unsigned cur_files_count = 0; + unsigned cur_expected_errors = 0; + + const int max_pmp = 16; // from spike + const int max_pmp_cfg = max_pmp / 8; // for RV64 +}; + + + +int +main() +{ +#if GEN_ALL + pmp_ok_1_gen_class gen_class_1; + + for (int u_mode = 0; u_mode < 2; u_mode++) { + for (int r = 0; r < 2; r++) { + for (int w = 0; w < 2; w++) { + for (int x = 0; x < 2; x++) { + for (int cfgl = 0; cfgl < 2; cfgl++) { + for (int pmp_match = 0; pmp_match < 2; pmp_match++) { + for (int mmwp = 0; mmwp < 2; mmwp++) { + for (int mml = 0; mml < 2; mml++) { + /* + * For RW=01, + * - mml == 1, test in pmp_ok_share_1 + * - mml == 0, reserved. + */ + if (r == 0 && w == 1) continue; + + str_buffer.str(""); + str_buffer << "outputs/test_pmp_ok_1_u" << u_mode << "_rw" << r << w << "_x" << x << "_l" << cfgl + << "_match" << pmp_match << "_mmwp" << mmwp << "_mml" << mml << ".c"; + m_ofstream.open(str_buffer.str().c_str()); + cur_files_count++; + + gen_class_1.set_tag(str_buffer.str()); + + unsigned rw_err = 0; + unsigned x_err = 0; + + gen_class_1.set_switch_u_mode(u_mode); + gen_class_1.set_pmp_r(r); + gen_class_1.set_pmp_w(w); + gen_class_1.set_pmp_x(x); + gen_class_1.set_pmp_l(cfgl); + + if (mml) { + gen_class_1.set_m_mode_rwx(0); + } else { + gen_class_1.set_m_mode_rwx(cur_files_count % 3 == 0 ? 1 : 0); + } + + gen_class_1.set_set_sec_mmwp(mmwp); + gen_class_1.set_set_sec_mml(mml); + + if (pmp_match) { + gen_class_1.set_create_pmp_cfg(pmp_match); + gen_class_1.set_pmp_addr_offset(0); + if (mml) { + if (cfgl && r && w && x) { // 2nd version, XWRL-MML is shared read-only + rw_err = 1; + x_err = 1; + } else { + if (1 - u_mode != cfgl) { + rw_err = 1; + x_err = 1; + } + if (r == 0 || w == 0) rw_err = 1; + if (x == 0) x_err = 1; + } + } else { + if (u_mode == 1 || cfgl) { + if (r == 0 || w == 0) rw_err = 1; + if (x == 0) x_err = 1; + } + } + } else { + if (cur_files_count % 3 == 0) { + gen_class_1.set_create_pmp_cfg(1); + gen_class_1.set_pmp_addr_offset(0x100); // >= sizeof(.test) section + } else { + gen_class_1.set_create_pmp_cfg(0); + } + if (u_mode == 1 || mmwp) { // mmwp to against non-match + rw_err = 1; + x_err = 1; + } else if (mml == 1) { + x_err = 1; + } + } + + cur_expected_errors += rw_err + x_err; + gen_class_1.set_expected_rw_fail(rw_err); + gen_class_1.set_expected_x_fail(x_err); + + str_buffer.str(""); + gen_class_1.generate_pmp_ok_1(str_buffer, 0); + str_buffer << std::endl; + m_ofstream << str_buffer.str(); + m_ofstream.close(); + } + } + } + } + } + } + } + } +#endif + +#if GEN_ALL + pmp_csr_1_gen_class gen_class_2; + + for (int pmp_lock = 0; pmp_lock < 2; pmp_lock++) { + for (int lock_once = 0; lock_once < 2; lock_once++) { + if (pmp_lock == 1 && lock_once == 1) continue; // locked once anyway + for (int pre_rlb = 0; pre_rlb < 2; pre_rlb++) { + for (int pre_mmwp = 0; pre_mmwp < 2; pre_mmwp++) { + for (int pre_mml = 0; pre_mml < 2; pre_mml++) { + for (int test_pmp = 0; test_pmp < 2; test_pmp++) { + for (int idx = 0; idx < 2; idx++) { + if (test_pmp == 0 && idx == 1) continue; // only 1 seccfg + for (int val = 0; val < 8; val++) { + if (val == 0 && test_pmp) continue; // skip, since no change +#if TEST_RW01_ONLY + if (test_pmp) { + if ((idx == 0 && (val & 0x3) == 0x1) || (idx == 1 && (val & 0x3) == 0x2)) { + // test RW=01; + } else { + continue; + } + } +#endif + + str_buffer.str(""); + str_buffer << "outputs/test_pmp_csr_1_lock" << pmp_lock << lock_once + << "_rlb" << pre_rlb << "_mmwp" << pre_mmwp << "_mml" << pre_mml + << (test_pmp ? "_pmp_" : "_sec_") << idx << val + << ".c"; + m_ofstream.open(str_buffer.str().c_str()); + cur_files_count++; + + gen_class_2.set_tag(str_buffer.str()); + + gen_class_2.set_m_mode_rwx(0); + gen_class_2.set_pmp_lock(pmp_lock); + gen_class_2.set_lock_once(lock_once); + + gen_class_2.set_lock_bypass(pre_rlb); + gen_class_2.set_pre_sec_mml(pre_mml); + gen_class_2.set_pre_sec_mmwp(pre_mmwp); + + gen_class_2.set_group_pmp(test_pmp); + + int pmpcfg_fail = 0; + int pmpaddr_fail = 0; + int seccfg_fail = 0; + + if (test_pmp == 1) { // pmpcfg and pmpaddr test + gen_class_2.set_revert_rwx(val); + + if (idx == 0) { // for cfg2 and cfg3, since PMP_L might set there + int sub_idx = 2 + cur_files_count % 2; + gen_class_2.set_addr_idx(sub_idx); + gen_class_2.set_addr_offset(0); + gen_class_2.set_cfg_idx(0); + gen_class_2.set_cfg_sub_idx(sub_idx); + + if (pmp_lock && !pre_rlb) { + pmpcfg_fail = 1; + pmpaddr_fail = 1; + } else { + // RW=01 is not allowed for MML==0 + if (!pre_mml && (val & 0x3) == 0x1) { // b'11^01 = 10, RW=01 + pmpcfg_fail = 1; + } + + /* + * Adding a rule with executable privileges that either is M-mode-only or a locked Shared-Region + * is not possible and such pmpcfg writes are ignored, leaving pmpcfg unchanged. + */ + bool set_PMP_L = (lock_once != pmp_lock); + unsigned rwx = 7 ^ val; + if (!pre_rlb && pre_mml && set_PMP_L && (rwx != 7) && ((rwx & 0x4) == 0x4 || (rwx & 0x3) == 0x2)) { + pmpcfg_fail = 1; + } + } + } else { // for invalid cfgs, start from 7 + gen_class_2.set_addr_idx(7 + cur_files_count % (max_pmp - 7)); + gen_class_2.set_addr_offset(0x10000); + gen_class_2.set_cfg_idx((1 + cur_files_count % (max_pmp_cfg - 1)) * 2); // for 2, 4, ..., 14 + gen_class_2.set_cfg_sub_idx((cur_files_count >> val) % 4); + if (!pre_mml && (val & 0x3) == 0x2) { // b'00^10 = 10, RW=01 + pmpcfg_fail = 1; + } + /* + * PMP_L cases with default LRWX=0000 + */ + bool set_PMP_L = (lock_once != 0); + unsigned rwx = 0 ^ val; + if (!pre_rlb && pre_mml && set_PMP_L && (rwx != 7) && ((rwx & 0x4) == 0x4 || (rwx & 0x3) == 0x2)) { + pmpcfg_fail = 1; + } + } + + if (pmpcfg_fail || pmpaddr_fail) cur_expected_errors += 1; + } else { // seccfg test + unsigned sec_val = val; + unsigned sec_rlb = (sec_val >> 2) & 0x1; + unsigned sec_mml = (sec_val >> 0) & 0x1; + unsigned sec_mmwp = (sec_val >> 1) & 0x1; + gen_class_2.set_sec_rlb(sec_rlb); + gen_class_2.set_sec_mml(sec_mml); + gen_class_2.set_sec_mmwp(sec_mmwp); + } + + gen_class_2.set_expected_seccfg_fail(seccfg_fail); + gen_class_2.set_expected_pmpaddr_fail(pmpaddr_fail); + gen_class_2.set_expected_pmpcfg_fail(pmpcfg_fail); + + str_buffer.str(""); + gen_class_2.generate_pmp_csr_1(str_buffer, 0); + str_buffer << std::endl; + m_ofstream << str_buffer.str(); + m_ofstream.close(); + } + } + } + } + } + } + } + } +#endif + +#if GEN_ALL + pmp_ok_share_1_gen_class gen_class_3; + for (int r = 0; r < 2; r++) { + for (int x = 0; x < 2; x++) { + for (int cfgl = 0; cfgl < 2; cfgl++) { + for (int typex = 0; typex < 2; typex++) { + for (int umode = 0; umode < 2; umode++) { + // not share mode and M mode + if (r == 1 && umode == 0) continue; + + str_buffer.str(""); + str_buffer << "outputs/test_pmp_ok_share_1_r" << r << "_x" << x << "_cfgl" << cfgl + << "_typex" << typex << "_umode" << umode << ".c"; + m_ofstream.open(str_buffer.str().c_str()); + cur_files_count++; + + gen_class_3.set_tag(str_buffer.str()); + + unsigned r_err = 0; + unsigned w_err = 0; + unsigned x_err = 0; + + gen_class_3.set_pmp_r(r); + gen_class_3.set_pmp_x(x); + gen_class_3.set_pmp_l(cfgl); + gen_class_3.set_typex(typex); + gen_class_3.set_enable_umode_test(umode); + + if (r != 0) { // not share mode + // 2nd version, XWRL-MML is shared read-only + if (x && cfgl) { + if (typex == 0) { + w_err = 1; + } else { + x_err = 1; + } + } else { + if (typex == 0) { + r_err = 1; + w_err = 1; + } else { + x_err = 1; + } + } + } else { + if (cfgl) { + if (typex == 0) { + if (x == 0) { + // no RW access + r_err = 1; + w_err = 1; + } else { + // readable for M mode + if (umode) { + r_err = 1; + w_err = 1; + } else { + w_err = 1; + } + } + } else { + // always executable + } + } else { + if (typex == 0) { + if (x == 0) { + // RW M mode, R for U + if (umode) { + w_err = 1; + } + } + } else { + x_err = 1; // when !cfgl, not executable + } + } + } + + cur_expected_errors += r_err + w_err + x_err; + gen_class_3.set_expected_r_fail(r_err); + gen_class_3.set_expected_w_fail(w_err); + gen_class_3.set_expected_x_fail(x_err); + + str_buffer.str(""); + gen_class_3.generate_pmp_ok_share_1(str_buffer, 0); + str_buffer << std::endl; + m_ofstream << str_buffer.str(); + m_ofstream.close(); + } + } + } + } + } +#endif + +#if GEN_ALL + unsigned expectedCount = 0; + for (int i=0; i> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..d883f97a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..9d1edcb6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..a0fb1a17 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..7a41bc05 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..a3179e91 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..2655a688 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..479a5d4e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..aa05f33b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..218484f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..c5b3341d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..1332f9b8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..a4af689e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..9bb9768a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..a1f8c655 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..4e5a02c7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..8c483335 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..81d3b00f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..c99a9d66 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..3b4512b1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..5199b7d7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..e5251d87 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr0 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (0 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr0, %1 \n" + "\tcsrr %0, pmpaddr0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr0 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(0 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..b68e7a43 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..733bad9a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..573f06dd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..9407c487 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..b7ebe3f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..1940f3c6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..45615800 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..c433b2fb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..59651e35 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..af7fbde8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..e5b59a56 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..49d57ec5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..d2f33b0a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..3e9ea53c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..a575841a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..d167644b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..e98e24c6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..44ee2ea2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..23b4f19d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..ca759321 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..bcec5531 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..a9856777 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..a7880143 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..ee4577d1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..dbcd878a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..981f5226 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..1f8ad6a8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..88491bd9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..6bc196ed --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..a6cd00c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..06a2670c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..90d9f9d2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..2a78d1c3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..e56ef606 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..18ffa1d8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..9f797567 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..02b2015d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..6e863fde --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..60438cce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..5ebd6d4b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..0a304bcc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..bd160f8b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..ce84df30 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..331bb3a4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..e612d663 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..cddb2aef --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..25f810cf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..4dbde5ff --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..931ddd4a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..d0f64cba --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..a4b1d42b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..90549a1a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..f08995c5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..b9a065a1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..b4ec9369 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..d0f317ff --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..4fc45e67 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..f001df23 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..f64da1b7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..148f7eed --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..15475f10 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..26ecaa03 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..87211001 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..cf78bf42 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..b95dfad4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..e3969778 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb0_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..29d55dc0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..5329e8b4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..d3a90ed6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..3a875e71 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..825b1471 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..12ad98d7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..e0f4e4e5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..65a01f1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..28fade41 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..e85524c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..3f21aa8f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..5c5b9e13 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..d3becdd0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..46d1e698 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..5f54b2e5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..4450d702 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..d63b1380 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..8169d4c1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..8e8825c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..d67039c9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..34783232 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..39cee672 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..411cc559 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..507f56cb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..57f0db54 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..22805ab4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..1bb4c3bb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..c6c6022e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..6206f2b5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..872cf9c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..e3dd7e1f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..f2e81a1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..d2f38a7f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..320bdccf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..4dcc75ea --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..71bbe73a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..c23dd2d9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..1e2be0a8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..4b701ff5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..66245667 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..dd24a817 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..0b2c3371 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..c1bfabc1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..3d83babf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..563d15b0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..dd3aa46a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..9843b909 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..8bf13de8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..2c685349 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..c0ab076a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..a864200b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..f7d6f4f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..8bfc7600 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..c2ee48f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..ac59085a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..d976ff06 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..84b826cd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..d3a333c4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..656fd426 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..ef870350 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..8c3380e7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..7289041c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..de5d5576 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..69f4a64d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..45802766 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..cb0a4693 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..3bc4e6f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..d5d1116b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..bce4fcf7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..b85dc3a9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..b312c82d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..39489ad1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..8346afc9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..210759b3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..ab1fc641 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..42b50320 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..a821b075 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..857ef934 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..2d366726 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..5bcd2d21 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..da2a73dd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..edad03bf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..7c46885b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..5b53aa93 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..bd359d1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..99b66de7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..138b957d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..d418b0f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock00_rlb1_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..1a3fe5de --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..5f3a3917 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..f6f5d9e1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..84b144e4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..6201ad12 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..7d3406d3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..4771dde3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..23806b44 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..0532f351 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..7f20ce63 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..1c1f1824 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..670a256f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..407e45f2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..bf70f329 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..235472e7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..9a71c8fc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..690210ea --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..93c09f19 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..35f7cfc3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..56624f47 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..088cbbf9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..b5b056ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..640439a8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..1f81f74b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..6411eef3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..28406299 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..a3eefe6c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..0ae5d5fa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..75298acd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..6ef040d9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..792e5be4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..bb799cd4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..06f3e202 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..3901f359 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..6aea5f72 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..e1ef95f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..4a57dd41 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..0799e19d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..b64999c2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..6fe87896 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..9e3cdce7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..3dca07a4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..c377ed39 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..dead666e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..2419a701 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..64b4639c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..2e9435e3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..53908ac9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..b8ccc166 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..3021c532 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..b570f859 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..12cf707e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..fe98ae6e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..ac6a65ae --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..7d474786 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..acf2fc81 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..2774b7a7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..2c11c45e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..0013abb9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..cfad8e07 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..f3007990 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..3e0fce4a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..dcd67a37 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..6fffea06 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..b194b9aa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..49715e09 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..913585f5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..060a32e7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..f01fb95d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..869215fd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..ab46071f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..bd985462 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..aafb4817 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..3fb53d5d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..4347f200 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..7104c250 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..721ceab4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..902577d4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..7b2215c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..34eafc0f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..19bf539f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..dcffef51 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..b4b6caad --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..e16eb222 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..2b1f1759 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..15e8ad02 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..51eea2e8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..8ae18f63 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb0_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..33384ed4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..9eb33c66 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..99c397ad --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..3dd63a4c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..7973db03 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..eeed0e91 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..5b22d970 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..f7aef7be --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..f123cb56 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..be46b6a9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..c7bfa921 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..11d9108d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..d01fd8d3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..516ece99 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..073963f4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..51e9862f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..8128eeb5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..183024a3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..bfb7f15c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..b1583015 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..80478615 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..2d365a77 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..fbc2988b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..c0d1b08b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..ed94ce66 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..a9ba9dd8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..d6b7079e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..39120ed2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..f79df0b4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..34d897ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..061aefe9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..8f22b9a0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..37409fcf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..b54a5f95 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..3a94ebaf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..09e19aaa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..5ec0d311 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..215f7d50 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..15e6a992 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..4abf2514 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..0f0ae1ac --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..2e14dacd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..fd257a1c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..bb901149 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..c2d3ef84 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..8007ef97 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..b25abf8a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..6648b0bc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..aa96209d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..a520e1a4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..908a03d7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..89c733fa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..956acb51 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..169259a1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..73283230 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..bcf56469 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..d00889f2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..dee6b818 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..ea1a53ab --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..eec8a0d5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..8599ddaf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..d0d86997 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..9e5006b6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..3e937e00 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..f2ed468b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..2cd28450 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..3b542b52 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..c2033817 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..4045ba63 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..f3dbdbd7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..9cca05e3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..6249270e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..244f9a5b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..8d84d074 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..ef831251 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (1 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..371d4203 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (1 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..0c8a3d54 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (1 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..9a6dfe4f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..9c3333c7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..d694a8b1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..40a06541 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..6ad709b7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..5eea0b30 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..995a931f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..88126494 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..aabb68d5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..3f547768 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..236d24bf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock01_rlb1_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((1 || 0) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (1 != 0) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (0) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (1 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 0 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..5a3ca605 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..e777536c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..07ae0457 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..38c4c94a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..1ce57af0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..c0bf34dd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..d18a5007 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..6f83ce17 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..cd9556af --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..b334f85a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..2bf9c05d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..9ce76ed6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..5c432472 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..b4bbae89 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..e847bdbb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..946d571a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..7142085b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..8fd69745 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..53632366 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..73dd24b0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..ba742091 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..91f3d84c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..54a2875f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..42f3b403 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..3d5dd3af --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..95b33bd8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..18fd50c5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..b86e0556 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..6c09a772 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..c6f429bc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..46acb232 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..21a0e727 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..be9f7ab0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..0e71d547 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..966bc68b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..b2b0ab55 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..7cd73cd9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..ec7b27b5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..adc194fb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..89834d6a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..5bad51a2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..742f2be8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..f248eed0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..aa67ff58 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..056623f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..52484eac --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..c3cc9b88 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..bd325185 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..2da0f52d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..a0cc8e5d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..3ceba7bb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..11a92815 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..67a055c0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..d56db180 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..afa0a324 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..6abfa044 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..eca54dc9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..2200f195 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..d96c524b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..e35753ee --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..1c8bb0c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..c7304b59 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..8fe50a57 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..9b97a80b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..59a02cd5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..1d91a947 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..059b0d86 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..b9d344cb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..285b6a30 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..58e24a91 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..c592b7b4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..ba4b0128 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..845853cc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 1; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..189ba0f3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..6f78b176 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..81295858 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..af1b86ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..f5605d06 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..43363e9d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..b14d755e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..2c48153b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..edb1c9b2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..43bd421e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..b6f4bb5f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..4625c365 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..40d94bcb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..36443cc8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..3d3b9ac9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb0_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 0 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c new file mode 100644 index 00000000..16458107 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c new file mode 100644 index 00000000..2b4644e8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c new file mode 100644 index 00000000..7256bcae --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c new file mode 100644 index 00000000..869d8d73 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c new file mode 100644 index 00000000..e599dfb7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c new file mode 100644 index 00000000..4e76d7fa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c new file mode 100644 index 00000000..c866fc15 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c new file mode 100644 index 00000000..a617d748 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c new file mode 100644 index 00000000..caa0b79c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c new file mode 100644 index 00000000..b04dd83f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c new file mode 100644 index 00000000..c6ba6450 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c new file mode 100644 index 00000000..68987d05 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c new file mode 100644 index 00000000..6cf6d876 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c new file mode 100644 index 00000000..5890323f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c new file mode 100644 index 00000000..b3176fb3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c new file mode 100644 index 00000000..0e6330b9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c new file mode 100644 index 00000000..b3a2db24 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c new file mode 100644 index 00000000..540a16d0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c new file mode 100644 index 00000000..2582c331 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c new file mode 100644 index 00000000..e2954af3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c new file mode 100644 index 00000000..7067dae9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c new file mode 100644 index 00000000..a8c7ee95 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c new file mode 100644 index 00000000..877e2ac7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c new file mode 100644 index 00000000..eac0826d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c new file mode 100644 index 00000000..feadbcc5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c new file mode 100644 index 00000000..426d572b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c new file mode 100644 index 00000000..0b79dcca --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c new file mode 100644 index 00000000..cbbad4fc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c new file mode 100644 index 00000000..134ab064 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c new file mode 100644 index 00000000..a90342ff --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c new file mode 100644 index 00000000..a7d380aa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c new file mode 100644 index 00000000..6fd62593 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c new file mode 100644 index 00000000..70d89528 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c new file mode 100644 index 00000000..a27075f9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c new file mode 100644 index 00000000..94bb6514 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c new file mode 100644 index 00000000..485e0bbb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c new file mode 100644 index 00000000..10e94e47 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c new file mode 100644 index 00000000..2dada932 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c new file mode 100644 index 00000000..1a9d7813 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c new file mode 100644 index 00000000..0bbb5864 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c new file mode 100644 index 00000000..2b848f27 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c new file mode 100644 index 00000000..e3609e36 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c new file mode 100644 index 00000000..884fc155 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c new file mode 100644 index 00000000..c4b265cf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp0_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (0) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c new file mode 100644 index 00000000..65f38596 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c new file mode 100644 index 00000000..dc490e83 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c new file mode 100644 index 00000000..3cb091e1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c new file mode 100644 index 00000000..a22ddba4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c new file mode 100644 index 00000000..a6fb16f3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c new file mode 100644 index 00000000..cc8de44c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c new file mode 100644 index 00000000..dae9ad9d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c new file mode 100644 index 00000000..12a205af --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c new file mode 100644 index 00000000..8dc34792 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c new file mode 100644 index 00000000..bfa666d6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c new file mode 100644 index 00000000..5c7e3b70 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr9 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (9 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr9, %1 \n" + "\tcsrr %0, pmpaddr9 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr9 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c new file mode 100644 index 00000000..feaaa5f0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c new file mode 100644 index 00000000..457e54b4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 1; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c new file mode 100644 index 00000000..f1f1abf8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c new file mode 100644 index 00000000..e5ac0d62 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c new file mode 100644 index 00000000..e9d47ae2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c new file mode 100644 index 00000000..b5d26d1d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c new file mode 100644 index 00000000..43cb37ac --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c new file mode 100644 index 00000000..b0509ae9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c new file mode 100644 index 00000000..b60a34d5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c new file mode 100644 index 00000000..668ef74d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c new file mode 100644 index 00000000..68808402 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml0_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (0) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr8 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (8 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr8, %1 \n" + "\tcsrr %0, pmpaddr8 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr8 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((0 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (0) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c new file mode 100644 index 00000000..724a844f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c new file mode 100644 index 00000000..c735a6bf --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c new file mode 100644 index 00000000..7a55c927 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c new file mode 100644 index 00000000..bf737438 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c new file mode 100644 index 00000000..081704c6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c new file mode 100644 index 00000000..0cc2b268 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr2 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (2 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr2, %1 \n" + "\tcsrr %0, pmpaddr2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr2 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c new file mode 100644 index 00000000..90a557fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr3 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (3 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 0; + } + asm volatile ("csrw pmpaddr3, %1 \n" + "\tcsrr %0, pmpaddr3 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr3 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg0 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg0, %1 \n" + "\tcsrr %0, pmpcfg0 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c new file mode 100644 index 00000000..21cba2cc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_11.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr10 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (10 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr10, %1 \n" + "\tcsrr %0, pmpaddr10 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr10 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(1 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c new file mode 100644 index 00000000..65729b6e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_12.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr11 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (11 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr11, %1 \n" + "\tcsrr %0, pmpaddr11 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr11 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(2 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c new file mode 100644 index 00000000..60289aa1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_13.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(3 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c new file mode 100644 index 00000000..cff8fd5e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_14.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr13 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (13 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr13, %1 \n" + "\tcsrr %0, pmpaddr13 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr13 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 0 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(4 | (0 ? PMP_L : 0)) << (0 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c new file mode 100644 index 00000000..c19310c3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_15.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr14 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (14 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr14, %1 \n" + "\tcsrr %0, pmpaddr14 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr14 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 2 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(5 | (0 ? PMP_L : 0)) << (2 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c new file mode 100644 index 00000000..7b738f46 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_16.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr15 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (15 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr15, %1 \n" + "\tcsrr %0, pmpaddr15 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr15 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 3 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(6 | (0 ? PMP_L : 0)) << (3 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c new file mode 100644 index 00000000..9fdbd482 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_pmp_17.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 1 + asm volatile ("csrr %0, pmpaddr7 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (7 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr7, %1 \n" + "\tcsrr %0, pmpaddr7 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr7 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c new file mode 100644 index 00000000..a63284d6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_00.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c new file mode 100644 index 00000000..4dc682a9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_01.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c new file mode 100644 index 00000000..c8911b88 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_02.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c new file mode 100644 index 00000000..e4e53b44 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_03.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (0 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c new file mode 100644 index 00000000..6e95293d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_04.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c new file mode 100644 index 00000000..f62b917e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_05.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (0 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c new file mode 100644 index 00000000..c54a6c20 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_06.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (0) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (0 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 0) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c new file mode 100644 index 00000000..f6b01623 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c @@ -0,0 +1,314 @@ + +/* + * outputs/test_pmp_csr_1_lock10_rlb1_mmwp1_mml1_sec_07.c + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = 0; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = 0; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = 0; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @set_rlb_at_start:int@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (1) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((0 || 1) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (0 != 1) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (1) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if 0 + asm volatile ("csrr %0, pmpaddr12 \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (12 == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + 65536; + } + asm volatile ("csrw pmpaddr12, %1 \n" + "\tcsrr %0, pmpaddr12 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr12 expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than 1 + asm volatile ("csrr %0, pmpcfg2 \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(7 | (0 ? PMP_L : 0)) << (1 * 8)); + asm volatile ("csrw pmpcfg2, %1 \n" + "\tcsrr %0, pmpcfg2 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (1) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (1 ? MSECCFG_RLB : 0) + | (1 ? MSECCFG_MML : 0) + | (1 ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((1 || 1 || 1) + && 1 == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (1) { + expected_val |= MSECCFG_MML; + } + if (1) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..40b3fd1e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..f3e37719 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..77081a0e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..94d25a03 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..3d8a4fdc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..74c38d3d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..a4bffc5b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..1e188a5f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..e6a859ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..34c5570d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..78cf5823 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..73ab6e3b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..af2e88fd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..c84cbfea --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..1068f701 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..f7ed3ac5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..f7030806 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..19f00239 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..8bd7f966 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..539b9c88 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..9e28bda4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..30c97840 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..efa958c1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..d30c9b74 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..1117a163 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..2cf61dc4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..0d9e737a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..59b7e007 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..74955806 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..f8ba50be --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..8dbfaa36 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..41cfd81c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw00_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..99202f86 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..ccf25866 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..88fe4370 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..0e840c52 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..993628df --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..cbc89f2d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..5d9df03d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..bfa8b06a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..2087b6a1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..93b311c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..01795ad6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..59627866 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..737a582a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..f4e02fcd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..b6cebfc0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..a4a23d96 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..185506cc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..e7780167 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..291c05ab --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..9c53d0fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..b64debb9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..6fa48efe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..daa81820 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..37f20ca9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..ed8193bd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..3de88ead --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..9654129a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..9810c431 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..97ca1888 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..26568415 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..8c7c21e3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..8c393805 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw10_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..482f0ba1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..be2e6553 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..00225439 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..03c19fad --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..dd1705be --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..5be1d78b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..b41e9f75 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..3d0ebdd6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..24751757 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..85c6b2cc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..80bd27fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..554530a4 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..4e6698fd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..6b1d1bfc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..851663dd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..7494a8da --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..ddd39b55 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..2930f6fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..c7fff758 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..baa00691 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..8f8828b5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..79e4e01d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..98b14231 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..3aedc68e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..0e65fc1f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..d3e2eb9e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..a17239c8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..5f46fe74 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..6120591b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..ab910a33 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..ac24185d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..e365813c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u0_rw11_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 0 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..7b7c3253 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..411316b7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..d4cd1cf5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..ca14b9d7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..5bbdb0d9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..83107ab8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..6009bfa6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..3f2e92fe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..ebfa32bc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..fcfdb7c3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..8971b659 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..b201b349 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..8ce88f1e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..03c823b7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..09cab838 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..8277548a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..d325a8c6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..c33238f5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..c609d1be --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..c816efbd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..8464a098 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..8477da4d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..5b63fcd8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..9a591e80 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..8a4c8c5a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..31e57fbc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..2d06aecc --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..d9376f43 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..d7ccc12e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..0cb6a30c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..90a91e6d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..ae51b319 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw00_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (0 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..282e459e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..cba1b922 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..6882bbf1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..0f18a9c9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..729cd357 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..65bc8335 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..8741c283 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..e05bfe67 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..6894def9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..cfd73713 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..117b1ff0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..4ac00d2c --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..5ef8ce29 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..44585be0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..f8f46f88 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..e97bd91f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..e5393dee --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..81041eba --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..fd2447af --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..b5272cf6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..85aa3fe8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..f8fb3c68 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..cd12b79d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..4f89f802 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..3fdda498 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..475246bb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..dee02396 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..8de615d8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..a596e329 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..d5c24006 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..b9b4f3c1 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..7aba47e2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw10_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (0 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..904fc44d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..eb2c2fcd --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..ae5fbd5b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..0fa4d95a --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..034c0637 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..218a65d8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..7b4228a0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..9595d009 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..7351d127 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..fa45c2c7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..186e3174 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..d1d4cf2f --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..94de37d3 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..8a05d92d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..ed7ec3fa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..a29ba3ee --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x0_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (0 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c new file mode 100644 index 00000000..79509326 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c new file mode 100644 index 00000000..c377e8e7 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c new file mode 100644 index 00000000..27367967 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c new file mode 100644 index 00000000..1b4b7c78 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c new file mode 100644 index 00000000..e8b6d8f9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c new file mode 100644 index 00000000..80dadf3d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c new file mode 100644 index 00000000..5874dd65 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c new file mode 100644 index 00000000..d367d8b6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l0_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (0 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c new file mode 100644 index 00000000..cfa481f9 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c new file mode 100644 index 00000000..214cd9c2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c new file mode 100644 index 00000000..bace2cf2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c new file mode 100644 index 00000000..070f6c66 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match0_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 256; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 256; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 0 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c new file mode 100644 index 00000000..4d93c217 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 1 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c new file mode 100644 index 00000000..4499f000 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp0_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (0 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c new file mode 100644 index 00000000..96794703 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 0; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (0) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (0 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c new file mode 100644 index 00000000..c30abb20 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c @@ -0,0 +1,345 @@ + +/* + * outputs/test_pmp_ok_1_u1_rw11_x1_l1_match1_mmwp1_mml1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = 1; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if 1 + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = 0; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @pmp_addr_offset:int@ is to create an address mismatch + * And @create_pmp_cfg:int@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = 0; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (1) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if 1 + cfg0 |= ( (1 ? PMP_R : 0) + | (1 ? PMP_W : 0) + | (1 ? PMP_X : 0) + | PMP_TOR | (1 ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (1 ? MSECCFG_MML : 0) | (1 ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c new file mode 100644 index 00000000..4ef61a78 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c new file mode 100644 index 00000000..18c29089 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c new file mode 100644 index 00000000..764ae510 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c new file mode 100644 index 00000000..67fcd493 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl0_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c new file mode 100644 index 00000000..9d5a1bb8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c new file mode 100644 index 00000000..347cb0ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c new file mode 100644 index 00000000..0fd54c69 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c new file mode 100644 index 00000000..715c485d --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x0_cfgl1_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c new file mode 100644 index 00000000..b2c8bcba --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c new file mode 100644 index 00000000..4d15a8b6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c new file mode 100644 index 00000000..8c12a5a2 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c new file mode 100644 index 00000000..dd0337f5 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl0_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c new file mode 100644 index 00000000..58d495ce --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c new file mode 100644 index 00000000..be5e02e0 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c new file mode 100644 index 00000000..02a96b0b --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode0.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 0 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c new file mode 100644 index 00000000..62457c95 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r0_x1_cfgl1_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((0 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c new file mode 100644 index 00000000..5af37e1e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c new file mode 100644 index 00000000..a6dacffb --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x0_cfgl0_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c new file mode 100644 index 00000000..58ce4407 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c new file mode 100644 index 00000000..29b59eaa --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x0_cfgl1_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (0 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c new file mode 100644 index 00000000..cc2e9709 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 1; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c new file mode 100644 index 00000000..94179906 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x1_cfgl0_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (0 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c new file mode 100644 index 00000000..5b2fd2d8 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex0_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 0) +#define TEST_FETCH (0) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 1; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 0; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c new file mode 100644 index 00000000..e3849239 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c @@ -0,0 +1,296 @@ + +/* + * outputs/test_pmp_ok_share_1_r1_x1_cfgl1_typex1_umode1.c + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - 1) +#define TEST_FETCH (1) +/* + * Whether rwx share single cfg for M mode + * When @set_sec_mml@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX 0 + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = 0; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = 0; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = 1; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((1 ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (1 ? PMP_X : 0) + | (1 ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if 1 + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.cc_skel b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.cc_skel new file mode 100644 index 00000000..c840d138 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.cc_skel @@ -0,0 +1,313 @@ + +/* + * @tag@ + * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel. + * + * This test program is verify the pmp CSR access when seccfg introduced. + * It's expected to executed from M mode. + * + * Remarks: + * - CSR protection for non-M mode access is assumed and not coverred. + * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred. + * - Executed on RV64 only. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#if ((PMP_R | PMP_W | PMP_X) != 0x7) +#error unexpected +#endif + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @@set_sec_mml@@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX @m_mode_rwx:int@ + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_seccfg_fail = @expected_seccfg_fail:int@; +static unsigned actual_seccfg_fail = 0; + +static const unsigned long expected_pmpaddr_fail = @expected_pmpaddr_fail:int@; +static unsigned actual_pmpaddr_fail = 0; + +static const unsigned long expected_pmpcfg_fail = @expected_pmpcfg_fail:int@; +static unsigned actual_pmpcfg_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + + +__attribute ((noinline)) +void target_foo() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +__attribute ((section(".text_umode"), noinline)) +void target_foo_U() { + asm volatile ("nop"); +} + +__attribute ((section(".data_umode"), aligned(8))) +volatile unsigned char target_arr_U[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 // @@set_rlb_at_start:int@@ + /* + * set MSECCFG_RLB to avoid locked at start + */ + asm volatile ("csrs 0x747, %0 \n"::"r"(MSECCFG_RLB)); + asm volatile ("nop"); +#endif + +//------------------------Set current status before the test target (CSR access) + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + if (@pre_sec_mml:int@) { // need to set L bit for M mode code access +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | ((@lock_once:int@ || @pmp_lock:int@) ? PMP_L : 0); + cfg0 |= sub_cfg << 24; // for U_MEM + cfg0 |= sub_cfg << 16; // for TEST_MEM + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + if (@lock_once:int@ != @pmp_lock:int@) { + reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type + if (@pmp_lock:int@) { + asm volatile ("csrs pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } else { + asm volatile ("csrc pmpcfg0, %0 \n" + : + : "r"(lock_bits) + : "memory"); + } + } + + // set proc->state.mseccfg + const unsigned seccfg_bits = (@lock_bypass:int@ ? MSECCFG_RLB : 0) + | (@pre_sec_mml:int@ ? MSECCFG_MML : 0) + | (@pre_sec_mmwp:int@ ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %0 \n"::"r"(seccfg_bits)); + +//------------------------Test target + asm volatile ("nop"); + /* + * Need to separate pmp and seccfg access since pmplock_recorded status may be + * updated again when accessing pmpcfg. + */ + reg_t wval = 0, rval; +#if @group_pmp:int@ + asm volatile ("csrr %0, pmpaddr@addr_idx:int@ \n" + : "=r"(rval)); + // give a valid value for both NAPOT and TOR + if (@addr_idx:int@ == 0) { + wval = ((rval + 1) << 1) - 1; // NAPOT mask + } else { + wval = (rval << 1) + @addr_offset:int@; + } + asm volatile ("csrw pmpaddr@addr_idx:int@, %1 \n" + "\tcsrr %0, pmpaddr@addr_idx:int@ \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpaddr@addr_idx:int@ expects %lx vs %lx\n", wval, rval); + actual_pmpaddr_fail = 1; + } + + // Update cfg0 to avoid changing idx other than @cfg_sub_idx:int@ + asm volatile ("csrr %0, pmpcfg@cfg_idx:int@ \n" + : "=r"(cfg0) + : + : "memory"); + + // reuse lock_once here since it's for RLB and independent with pmp_lock + wval = cfg0 ^ ((reg_t)(@revert_rwx:int@ | (@lock_once:int@ ? PMP_L : 0)) << (@cfg_sub_idx:int@ * 8)); + asm volatile ("csrw pmpcfg@cfg_idx:int@, %1 \n" + "\tcsrr %0, pmpcfg@cfg_idx:int@ \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + if (wval != rval) { + printf("pmpcfg expects %lx vs %lx\n", wval, rval); + actual_pmpcfg_fail = 1; + } +#else + /* + * need to set PMP_L for cfg0 otherwise next PC will illegal + * This is a little coverage hole for non-PMP_L + mml, which should be + * a restricted use case and can be accepted anyway. + */ + if (@sec_mml:int@) { +#if M_MODE_RWX + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(PMP_L)); +#else +#if __riscv_xlen == 64 + asm volatile ("csrs pmpcfg0, %0 \n"::"r"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48))); +#else + asm volatile ("csrs pmpcfg1, %0 \n"::"r"((PMP_L << 8) | (PMP_L << 16))); +#endif // __riscv_xlen == 64 +#endif // M_MODE_RWX + } + + wval = (@sec_rlb:int@ ? MSECCFG_RLB : 0) + | (@sec_mml:int@ ? MSECCFG_MML : 0) + | (@sec_mmwp:int@ ? MSECCFG_MMWP : 0); + asm volatile ("csrw 0x747, %1 \n" + "\tcsrr %0, 0x747 \n" + : "=r"(rval) + : "r"(wval) + : "memory"); + reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP); + /* + * pre_sec_mml means cfg0 locked + * pmp_lock means cfg2/3 locked + * sec_mml is the test coverage hole just mentioned + */ + if ((@pre_sec_mml:int@ || @pmp_lock:int@ || @sec_mml:int@) + && @lock_bypass:int@ == 0) { + expected_val &= ~MSECCFG_RLB; + } + if (@pre_sec_mml:int@) { + expected_val |= MSECCFG_MML; + } + if (@pre_sec_mmwp:int@) { + expected_val |= MSECCFG_MMWP; + } + + if (expected_val != rval) actual_seccfg_fail = 1; +#endif +} + +static void checkTestResult() { + int ret = 0; + if (expected_seccfg_fail != actual_seccfg_fail) { + ret += 1; + } + + if (expected_pmpaddr_fail != actual_pmpaddr_fail) { + ret += 2; + } + + if (expected_pmpcfg_fail != actual_pmpcfg_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + checkTestResult(); + return 0; // assert 0 +} diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.h b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.h new file mode 100644 index 00000000..4765af72 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_csr_1.h @@ -0,0 +1,1170 @@ +/* + * File automatically generated by + * gengen 1.4.2 by Lorenzo Bettini + * http://www.gnu.org/software/gengen + */ + +#ifndef PMP_CSR_1_GEN_CLASS_H +#define PMP_CSR_1_GEN_CLASS_H + +#include +#include + +using std::string; +using std::ostream; + +class pmp_csr_1_gen_class +{ + protected: + int addr_idx; + int addr_offset; + int cfg_idx; + int cfg_sub_idx; + int expected_pmpaddr_fail; + int expected_pmpcfg_fail; + int expected_seccfg_fail; + int group_pmp; + int lock_bypass; + int lock_once; + int m_mode_rwx; + int pmp_lock; + int pre_sec_mml; + int pre_sec_mmwp; + int revert_rwx; + int sec_mml; + int sec_mmwp; + int sec_rlb; + string tag; + + public: + pmp_csr_1_gen_class() : + addr_idx (0), addr_offset (0), cfg_idx (0), cfg_sub_idx (0), expected_pmpaddr_fail (0), expected_pmpcfg_fail (0), expected_seccfg_fail (0), group_pmp (0), lock_bypass (0), lock_once (0), m_mode_rwx (0), pmp_lock (0), pre_sec_mml (0), pre_sec_mmwp (0), revert_rwx (0), sec_mml (0), sec_mmwp (0), sec_rlb (0) + { + } + + pmp_csr_1_gen_class(int _addr_idx, int _addr_offset, int _cfg_idx, int _cfg_sub_idx, int _expected_pmpaddr_fail, int _expected_pmpcfg_fail, int _expected_seccfg_fail, int _group_pmp, int _lock_bypass, int _lock_once, int _m_mode_rwx, int _pmp_lock, int _pre_sec_mml, int _pre_sec_mmwp, int _revert_rwx, int _sec_mml, int _sec_mmwp, int _sec_rlb, const string &_tag) : + addr_idx (_addr_idx), addr_offset (_addr_offset), cfg_idx (_cfg_idx), cfg_sub_idx (_cfg_sub_idx), expected_pmpaddr_fail (_expected_pmpaddr_fail), expected_pmpcfg_fail (_expected_pmpcfg_fail), expected_seccfg_fail (_expected_seccfg_fail), group_pmp (_group_pmp), lock_bypass (_lock_bypass), lock_once (_lock_once), m_mode_rwx (_m_mode_rwx), pmp_lock (_pmp_lock), pre_sec_mml (_pre_sec_mml), pre_sec_mmwp (_pre_sec_mmwp), revert_rwx (_revert_rwx), sec_mml (_sec_mml), sec_mmwp (_sec_mmwp), sec_rlb (_sec_rlb), tag (_tag) + { + } + + void set_addr_idx(int _addr_idx) + { + addr_idx = _addr_idx; + } + + void set_addr_offset(int _addr_offset) + { + addr_offset = _addr_offset; + } + + void set_cfg_idx(int _cfg_idx) + { + cfg_idx = _cfg_idx; + } + + void set_cfg_sub_idx(int _cfg_sub_idx) + { + cfg_sub_idx = _cfg_sub_idx; + } + + void set_expected_pmpaddr_fail(int _expected_pmpaddr_fail) + { + expected_pmpaddr_fail = _expected_pmpaddr_fail; + } + + void set_expected_pmpcfg_fail(int _expected_pmpcfg_fail) + { + expected_pmpcfg_fail = _expected_pmpcfg_fail; + } + + void set_expected_seccfg_fail(int _expected_seccfg_fail) + { + expected_seccfg_fail = _expected_seccfg_fail; + } + + void set_group_pmp(int _group_pmp) + { + group_pmp = _group_pmp; + } + + void set_lock_bypass(int _lock_bypass) + { + lock_bypass = _lock_bypass; + } + + void set_lock_once(int _lock_once) + { + lock_once = _lock_once; + } + + void set_m_mode_rwx(int _m_mode_rwx) + { + m_mode_rwx = _m_mode_rwx; + } + + void set_pmp_lock(int _pmp_lock) + { + pmp_lock = _pmp_lock; + } + + void set_pre_sec_mml(int _pre_sec_mml) + { + pre_sec_mml = _pre_sec_mml; + } + + void set_pre_sec_mmwp(int _pre_sec_mmwp) + { + pre_sec_mmwp = _pre_sec_mmwp; + } + + void set_revert_rwx(int _revert_rwx) + { + revert_rwx = _revert_rwx; + } + + void set_sec_mml(int _sec_mml) + { + sec_mml = _sec_mml; + } + + void set_sec_mmwp(int _sec_mmwp) + { + sec_mmwp = _sec_mmwp; + } + + void set_sec_rlb(int _sec_rlb) + { + sec_rlb = _sec_rlb; + } + + void set_tag(const string &_tag) + { + tag = _tag; + } + + void generate_pmp_csr_1(ostream &stream, unsigned int indent = 0) + { + string indent_str (indent, ' '); + indent = 0; + + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << tag; + stream << "\n"; + stream << indent_str; + stream << " * Generated from gen_pmp_test.cc and test_pmp_csr_1.cc_skel."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * This test program is verify the pmp CSR access when seccfg introduced."; + stream << "\n"; + stream << indent_str; + stream << " * It's expected to executed from M mode."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * Remarks:"; + stream << "\n"; + stream << indent_str; + stream << " * - CSR protection for non-M mode access is assumed and not coverred."; + stream << "\n"; + stream << indent_str; + stream << " * - The access on invalid CSR index like pmpcfg1 for rv64 is not coverred."; + stream << "\n"; + stream << indent_str; + stream << " * - Executed on RV64 only."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Macros from encoding.h"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define MSTATUS_MPP 0x00001800"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_R 0x01"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_W 0x02"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_X 0x04"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_A 0x18"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_L 0x80"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_SHIFT 2"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if ((PMP_R | PMP_W | PMP_X) != 0x7)"; + stream << "\n"; + stream << indent_str; + stream << "#error unexpected"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_OFF 0x0"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_TOR 0x08"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NA4 0x10"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NAPOT 0x18"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MML 0x1"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MMWP 0x2"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_RLB 0x4"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_RW 1"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_FETCH 1"; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Whether rwx share single cfg for M mode"; + stream << "\n"; + stream << indent_str; + stream << " * When "; + stream << "@"; + stream << "set_sec_mml"; + stream << "@"; + stream << " set, it must be 0, otherwise unexpected exception"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define M_MODE_RWX "; + stream << m_mode_rwx; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_LOAD_ACCESS 0x5"; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_STORE_ACCESS 0x7"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long reg_t;"; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long uintptr_t;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * functions from syscalls.c"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#if PRINTF_SUPPORTED"; + stream << "\n"; + stream << indent_str; + stream << "int printf(const char* fmt, ...);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << "#define printf(...)"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "void __attribute__((noreturn)) tohost_exit(uintptr_t code);"; + stream << "\n"; + stream << indent_str; + stream << "void exit(int code);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * local status"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_START 0x200000"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_END 0x240000"; + stream << "\n"; + stream << indent_str; + stream << "#define U_MEM_END (TEST_MEM_END + 0x10000)"; + stream << "\n"; + stream << indent_str; + stream << "#define FAKE_ADDRESS 0x10000000"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_seccfg_fail = "; + stream << expected_seccfg_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_seccfg_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_pmpaddr_fail = "; + stream << expected_pmpaddr_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_pmpaddr_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_pmpcfg_fail = "; + stream << expected_pmpcfg_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_pmpcfg_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult(void);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * override syscalls.c."; + stream << "\n"; + stream << indent_str; + stream << " * currently simply skip to nexp instruction"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32])"; + stream << "\n"; + stream << indent_str; + stream << "{ "; + stream << "\n"; + stream << indent_str; + stream << " tohost_exit(1337);"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "void target_foo() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".data_test_arr\"), aligned(8)))"; + stream << "\n"; + stream << indent_str; + stream << "volatile unsigned char target_arr[100] = {"; + stream << "\n"; + stream << indent_str; + stream << " 1,2,3,4,5,6,7,8,"; + stream << "\n"; + stream << indent_str; + stream << "};"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_umode\"), noinline))"; + stream << "\n"; + stream << indent_str; + stream << "void target_foo_U() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".data_umode\"), aligned(8)))"; + stream << "\n"; + stream << indent_str; + stream << "volatile unsigned char target_arr_U[100] = {"; + stream << "\n"; + stream << indent_str; + stream << " 1,2,3,4,5,6,7,8,"; + stream << "\n"; + stream << indent_str; + stream << "};"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * On processor_t::reset():"; + stream << "\n"; + stream << indent_str; + stream << " * - set_csr(CSR_PMPADDR0, ~reg_t(0));"; + stream << "\n"; + stream << indent_str; + stream << " * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "static void set_cfg() {"; + stream << "\n"; + stream << indent_str; + stream << "#if 1 // "; + stream << "@"; + stream << "set_rlb_at_start:int"; + stream << "@"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * set MSECCFG_RLB to avoid locked at start"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(MSECCFG_RLB));"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "//------------------------Set current status before the test target (CSR access)"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR."; + stream << "\n"; + stream << indent_str; + stream << " * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance."; + stream << "\n"; + stream << indent_str; + stream << " * Also use pmp3cfg for fixed U mode (U_MEM)."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr3, %0 \\n\" :: \"r\"(U_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr2, %0 \\n\" :: \"r\"(TEST_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr1, %0 \\n\" :: \"r\"(TEST_MEM_START >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"((TEST_MEM_START >> 3) - 1) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr6, %0 \\n\" :: \"r\"(TEST_MEM_START >> 2) : \"memory\"); // for data"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr5, %0 \\n\" :: \"r\"(0x80010000 >> 2) : \"memory\"); // for code"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr4, %0 \\n\" :: \"r\"(0x80000000 >> 2) : \"memory\"); // addr start"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = PMP_OFF;"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8);"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << pre_sec_mml; + stream << ") { // need to set L bit for M mode code access"; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= PMP_L;"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " cfg1 |= ((PMP_L << 8) | (PMP_L << 16));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " reg_t sub_cfg = PMP_R | PMP_W | PMP_X | PMP_TOR | (("; + stream << lock_once; + stream << " || "; + stream << pmp_lock; + stream << ") ? PMP_L : 0);"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= sub_cfg << 24; // for U_MEM"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= sub_cfg << 16; // for TEST_MEM"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << "#if __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (cfg1 << 32);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg1, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg1)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "#endif // __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << "#endif // !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << lock_once; + stream << " != "; + stream << pmp_lock; + stream << ") {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t lock_bits = (PMP_L << 16) | ((reg_t)PMP_L << 24); // avoid use (default) int type"; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << pmp_lock; + stream << ") {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(lock_bits)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " } else {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrc pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(lock_bits)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // set proc->state.mseccfg"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned seccfg_bits = ("; + stream << lock_bypass; + stream << " ? MSECCFG_RLB : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pre_sec_mml; + stream << " ? MSECCFG_MML : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pre_sec_mmwp; + stream << " ? MSECCFG_MMWP : 0);"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw 0x747, %0 \\n\"::\"r\"(seccfg_bits));"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "//------------------------Test target"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * Need to separate pmp and seccfg access since pmplock_recorded status may be "; + stream << "\n"; + stream << indent_str; + stream << " * updated again when accessing pmpcfg."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " reg_t wval = 0, rval;"; + stream << "\n"; + stream << indent_str; + stream << "#if "; + stream << group_pmp; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, pmpaddr"; + stream << addr_idx; + stream << " \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(rval));"; + stream << "\n"; + stream << indent_str; + stream << " // give a valid value for both NAPOT and TOR"; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << addr_idx; + stream << " == 0) {"; + stream << "\n"; + stream << indent_str; + stream << " wval = ((rval + 1) << 1) - 1; // NAPOT mask"; + stream << "\n"; + stream << indent_str; + stream << " } else {"; + stream << "\n"; + stream << indent_str; + stream << " wval = (rval << 1) + "; + stream << addr_offset; + stream << "; "; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr"; + stream << addr_idx; + stream << ", %1 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrr %0, pmpaddr"; + stream << addr_idx; + stream << " \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(rval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(wval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " if (wval != rval) {"; + stream << "\n"; + stream << indent_str; + stream << " printf(\"pmpaddr"; + stream << addr_idx; + stream << " expects %lx vs %lx\\n\", wval, rval);"; + stream << "\n"; + stream << indent_str; + stream << " actual_pmpaddr_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // Update cfg0 to avoid changing idx other than "; + stream << cfg_sub_idx; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, pmpcfg"; + stream << cfg_idx; + stream << " \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // reuse lock_once here since it's for RLB and independent with pmp_lock"; + stream << "\n"; + stream << indent_str; + stream << " wval = cfg0 ^ ((reg_t)("; + stream << revert_rwx; + stream << " | ("; + stream << lock_once; + stream << " ? PMP_L : 0)) << ("; + stream << cfg_sub_idx; + stream << " * 8));"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg"; + stream << cfg_idx; + stream << ", %1 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrr %0, pmpcfg"; + stream << cfg_idx; + stream << " \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(rval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(wval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " if (wval != rval) {"; + stream << "\n"; + stream << indent_str; + stream << " printf(\"pmpcfg expects %lx vs %lx\\n\", wval, rval);"; + stream << "\n"; + stream << indent_str; + stream << " actual_pmpcfg_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * need to set PMP_L for cfg0 otherwise next PC will illegal"; + stream << "\n"; + stream << indent_str; + stream << " * This is a little coverage hole for non-PMP_L + mml, which should be"; + stream << "\n"; + stream << indent_str; + stream << " * a restricted use case and can be accepted anyway."; + stream << "\n"; + stream << indent_str; + stream << " */ "; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << sec_mml; + stream << ") { "; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs pmpcfg0, %0 \\n\"::\"r\"(PMP_L));"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << "#if __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs pmpcfg0, %0 \\n\"::\"r\"(((reg_t)PMP_L << 40) | ((reg_t)PMP_L << 48)));"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs pmpcfg1, %0 \\n\"::\"r\"((PMP_L << 8) | (PMP_L << 16)));"; + stream << "\n"; + stream << indent_str; + stream << "#endif // __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << "#endif // M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " wval = ("; + stream << sec_rlb; + stream << " ? MSECCFG_RLB : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << sec_mml; + stream << " ? MSECCFG_MML : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << sec_mmwp; + stream << " ? MSECCFG_MMWP : 0);"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw 0x747, %1 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrr %0, 0x747 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(rval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(wval)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " reg_t expected_val = wval & (MSECCFG_RLB | MSECCFG_MML | MSECCFG_MMWP);"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * pre_sec_mml means cfg0 locked"; + stream << "\n"; + stream << indent_str; + stream << " * pmp_lock means cfg2/3 locked"; + stream << "\n"; + stream << indent_str; + stream << " * sec_mml is the test coverage hole just mentioned"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " if (("; + stream << pre_sec_mml; + stream << " || "; + stream << pmp_lock; + stream << " || "; + stream << sec_mml; + stream << ") "; + stream << "\n"; + stream << indent_str; + stream << " && "; + stream << lock_bypass; + stream << " == 0) {"; + stream << "\n"; + stream << indent_str; + stream << " expected_val &= ~MSECCFG_RLB;"; + stream << "\n"; + stream << indent_str; + stream << " } "; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << pre_sec_mml; + stream << ") {"; + stream << "\n"; + stream << indent_str; + stream << " expected_val |= MSECCFG_MML;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << pre_sec_mmwp; + stream << ") {"; + stream << "\n"; + stream << indent_str; + stream << " expected_val |= MSECCFG_MMWP;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if (expected_val != rval) actual_seccfg_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult() {"; + stream << "\n"; + stream << indent_str; + stream << " int ret = 0;"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_seccfg_fail != actual_seccfg_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (expected_pmpaddr_fail != actual_pmpaddr_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 2;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if (expected_pmpcfg_fail != actual_pmpcfg_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 4;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " exit(ret); "; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "int main() {"; + stream << "\n"; + stream << indent_str; + stream << " // assert in M mode"; + stream << "\n"; + stream << indent_str; + stream << " set_cfg();"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " return 0; // assert 0"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + } +}; + +#endif // PMP_CSR_1_GEN_CLASS_H diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.cc_skel b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.cc_skel new file mode 100644 index 00000000..7d10c502 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.cc_skel @@ -0,0 +1,344 @@ + +/* + * @tag@ + * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel. + * + * This test program is expected to start executed from M mode. + * That will be easier for us to deal with pmp exception for test. + * + * Remarks: + * - RW=01 not covered. U/M mode share will be tested separately + * - RLB is always 0. CSR access control will be tested separately + * + * @@changed 2020-Mar-2 soberl + * For RWXL + MML, need to separate R and W combinations. + * Skip RW=01 (share mode) at generator driver side. + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW 1 +#define TEST_FETCH 1 +/* + * Whether rwx share single cfg for M mode + * When @@set_sec_mml@@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX @m_mode_rwx:int@ + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_rw_fail = @expected_rw_fail:int@; +static unsigned actual_rw_fail = 0; + +static const unsigned long expected_x_fail = @expected_x_fail:int@; +static unsigned actual_x_fail = 0; +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + actual_rw_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +// switch (eret) to U mode and resume next PC +static void switch_to_U() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +static void switch_mode() { +#if @switch_u_mode:int@ + switch_to_U(); +#endif +} + +__attribute ((noinline)) +static void target_foo() { + asm volatile ("nop"); + + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +static int detect_pmp_granularity(){ + unsigned int granule; + unsigned long int temp_reg; + unsigned long int all_ones = ~0x0UL; + + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(all_ones) : "memory"); + asm volatile ("csrr %0, pmpaddr0 \n" : "=r"(temp_reg)); + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"(0x0) : "memory"); + + int g = 2; + for(uintptr_t i = 1; i; i<<=1) { + if((temp_reg & i) != 0) + break; + g++; + } + granule = 1UL << g; + + return granule; +} + +static int mismatch_addr_offset(int granule_size){ + unsigned int addr_offset = @pmp_addr_offset:int@; + + if (addr_offset == 0x0){ + return 0x0; + } + else { + unsigned int mismatch_offset = granule_size; + while (mismatch_offset < addr_offset){ + mismatch_offset = mismatch_offset << 0x1; + } + return mismatch_offset; + } +} + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + * + * Here @@pmp_addr_offset:int@@ is to create an address mismatch + * And @@create_pmp_cfg:int@@ is to create cfg mismatch. + */ + + unsigned int mismatch_offset = @pmp_addr_offset:int@; + + if (mismatch_offset != 0x0){ + volatile int pmp_granularity = detect_pmp_granularity(); + mismatch_offset = mismatch_addr_offset(pmp_granularity); + } + + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START + mismatch_offset) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // Only true for Spike +// asm volatile ("csrr %0, pmpcfg0\n":"=r"(cfg0)); +// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) { +// exit(cfg0); +// } + + if (@set_sec_mml:int@) { // need to set L bit for M mode code like trap_handling +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + } + + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM +#if @create_pmp_cfg:int@ + cfg0 |= ( (@pmp_r:int@ ? PMP_R : 0) + | (@pmp_w:int@ ? PMP_W : 0) + | (@pmp_x:int@ ? PMP_X : 0) + | PMP_TOR | (@pmp_l:int@ ? PMP_L : 0)) << 16; +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = (@set_sec_mml:int@ ? MSECCFG_MML : 0) | (@set_sec_mmwp:int@ ? MSECCFG_MMWP : 0); + if (seccfg_bits) { + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + } + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_rw_fail = 1; + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_rw_fail != actual_rw_fail) { + ret += 1; + } + + if (expected_x_fail != actual_x_fail) { + ret += 2; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + switch_mode(); // in case swith to u mode, branch to try_access_umode directly + + try_access(); + + checkTestResult(); + return 0; // assert 0 +} diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.h b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.h new file mode 100644 index 00000000..2e004f6e --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_1.h @@ -0,0 +1,1177 @@ +/* + * File automatically generated by + * gengen 1.4.2 by Lorenzo Bettini + * http://www.gnu.org/software/gengen + */ + +#ifndef PMP_OK_1_GEN_CLASS_H +#define PMP_OK_1_GEN_CLASS_H + +#include +#include + +using std::string; +using std::ostream; + +class pmp_ok_1_gen_class +{ + protected: + int create_pmp_cfg; + int expected_rw_fail; + int expected_x_fail; + int m_mode_rwx; + int pmp_addr_offset; + int pmp_l; + int pmp_r; + int pmp_w; + int pmp_x; + int set_sec_mml; + int set_sec_mmwp; + int switch_u_mode; + string tag; + + public: + pmp_ok_1_gen_class() : + create_pmp_cfg (0), expected_rw_fail (0), expected_x_fail (0), m_mode_rwx (0), pmp_addr_offset (0), pmp_l (0), pmp_r (0), pmp_w (0), pmp_x (0), set_sec_mml (0), set_sec_mmwp (0), switch_u_mode (0) + { + } + + pmp_ok_1_gen_class(int _create_pmp_cfg, int _expected_rw_fail, int _expected_x_fail, int _m_mode_rwx, int _pmp_addr_offset, int _pmp_l, int _pmp_r, int _pmp_w, int _pmp_x, int _set_sec_mml, int _set_sec_mmwp, int _switch_u_mode, const string &_tag) : + create_pmp_cfg (_create_pmp_cfg), expected_rw_fail (_expected_rw_fail), expected_x_fail (_expected_x_fail), m_mode_rwx (_m_mode_rwx), pmp_addr_offset (_pmp_addr_offset), pmp_l (_pmp_l), pmp_r (_pmp_r), pmp_w (_pmp_w), pmp_x (_pmp_x), set_sec_mml (_set_sec_mml), set_sec_mmwp (_set_sec_mmwp), switch_u_mode (_switch_u_mode), tag (_tag) + { + } + + void set_create_pmp_cfg(int _create_pmp_cfg) + { + create_pmp_cfg = _create_pmp_cfg; + } + + void set_expected_rw_fail(int _expected_rw_fail) + { + expected_rw_fail = _expected_rw_fail; + } + + void set_expected_x_fail(int _expected_x_fail) + { + expected_x_fail = _expected_x_fail; + } + + void set_m_mode_rwx(int _m_mode_rwx) + { + m_mode_rwx = _m_mode_rwx; + } + + void set_pmp_addr_offset(int _pmp_addr_offset) + { + pmp_addr_offset = _pmp_addr_offset; + } + + void set_pmp_l(int _pmp_l) + { + pmp_l = _pmp_l; + } + + void set_pmp_r(int _pmp_r) + { + pmp_r = _pmp_r; + } + + void set_pmp_w(int _pmp_w) + { + pmp_w = _pmp_w; + } + + void set_pmp_x(int _pmp_x) + { + pmp_x = _pmp_x; + } + + void set_set_sec_mml(int _set_sec_mml) + { + set_sec_mml = _set_sec_mml; + } + + void set_set_sec_mmwp(int _set_sec_mmwp) + { + set_sec_mmwp = _set_sec_mmwp; + } + + void set_switch_u_mode(int _switch_u_mode) + { + switch_u_mode = _switch_u_mode; + } + + void set_tag(const string &_tag) + { + tag = _tag; + } + + void generate_pmp_ok_1(ostream &stream, unsigned int indent = 0) + { + string indent_str (indent, ' '); + indent = 0; + + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << tag; + stream << "\n"; + stream << indent_str; + stream << " * Generated from gen_pmp_test.cc and test_pmp_ok_1.cc_skel."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * This test program is expected to start executed from M mode."; + stream << "\n"; + stream << indent_str; + stream << " * That will be easier for us to deal with pmp exception for test."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * Remarks:"; + stream << "\n"; + stream << indent_str; + stream << " * - RW=01 not covered. U/M mode share will be tested separately"; + stream << "\n"; + stream << indent_str; + stream << " * - RLB is always 0. CSR access control will be tested separately"; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "@"; + stream << "changed 2020-Mar-2 soberl"; + stream << "\n"; + stream << indent_str; + stream << " * For RWXL + MML, need to separate R and W combinations."; + stream << "\n"; + stream << indent_str; + stream << " * Skip RW=01 (share mode) at generator driver side."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Macros from encoding.h"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define MSTATUS_MPP 0x00001800"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_R 0x01"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_W 0x02"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_X 0x04"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_A 0x18"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_L 0x80"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_SHIFT 2"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_OFF 0x0"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_TOR 0x08"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NA4 0x10"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NAPOT 0x18"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MML 0x1"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MMWP 0x2"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_RLB 0x4"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_RW 1"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_FETCH 1"; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Whether rwx share single cfg for M mode"; + stream << "\n"; + stream << indent_str; + stream << " * When "; + stream << "@"; + stream << "set_sec_mml"; + stream << "@"; + stream << " set, it must be 0, otherwise unexpected exception"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define M_MODE_RWX "; + stream << m_mode_rwx; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_LOAD_ACCESS 0x5"; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_STORE_ACCESS 0x7"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long reg_t;"; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long uintptr_t;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * functions from syscalls.c"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#if PRINTF_SUPPORTED"; + stream << "\n"; + stream << indent_str; + stream << "int printf(const char* fmt, ...);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << "#define printf(...)"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "void __attribute__((noreturn)) tohost_exit(uintptr_t code);"; + stream << "\n"; + stream << indent_str; + stream << "void exit(int code);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * local status"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_START 0x200000"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_END 0x240000"; + stream << "\n"; + stream << indent_str; + stream << "#define U_MEM_END (TEST_MEM_END + 0x10000)"; + stream << "\n"; + stream << indent_str; + stream << "#define FAKE_ADDRESS 0x10000000"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_rw_fail = "; + stream << expected_rw_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_rw_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_x_fail = "; + stream << expected_x_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_x_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult(void);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * override syscalls.c."; + stream << "\n"; + stream << indent_str; + stream << " * currently simply skip to nexp instruction"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32])"; + stream << "\n"; + stream << indent_str; + stream << "{"; + stream << "\n"; + stream << indent_str; + stream << " if (epc >= TEST_MEM_START && epc < TEST_MEM_END) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t addr;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, mtval\\n\" : \"=r\"(addr));"; + stream << "\n"; + stream << indent_str; + stream << " if (addr >= TEST_MEM_START && addr < TEST_MEM_END) {"; + stream << "\n"; + stream << indent_str; + stream << " actual_rw_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " return epc + 4;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 8; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if (addr == FAKE_ADDRESS) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " printf(\"cause = %ld, epc = 0x%lx\\n\", cause, epc);"; + stream << "\n"; + stream << indent_str; + stream << " tohost_exit(1337);"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// switch (eret) to U mode and resume next PC"; + stream << "\n"; + stream << indent_str; + stream << "static void switch_to_U() {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t tmp;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile ("; + stream << "\n"; + stream << indent_str; + stream << " \"li %0, %1\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrc mstatus, t0\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tla %0, try_access_umode \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrw mepc, %0\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tli sp, %2\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tmret\\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(tmp) : \"n\"(MSTATUS_MPP), \"n\"(U_MEM_END) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void switch_mode() {"; + stream << "\n"; + stream << indent_str; + stream << "#if "; + stream << switch_u_mode; + stream << "\n"; + stream << indent_str; + stream << " switch_to_U();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void target_foo() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * avoid to access actual_x_fail lies in M mode"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_test_foo\"), noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void target_foo_umode() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".data_test_arr\"), aligned(8)))"; + stream << "\n"; + stream << indent_str; + stream << "static volatile unsigned char target_arr[100] = {"; + stream << "\n"; + stream << indent_str; + stream << " 1,2,3,4,5,6,7,8,"; + stream << "\n"; + stream << indent_str; + stream << "};"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static int detect_pmp_granularity(){"; + stream << "\n"; + stream << indent_str; + stream << " unsigned int granule;"; + stream << "\n"; + stream << indent_str; + stream << " unsigned long int temp_reg;"; + stream << "\n"; + stream << indent_str; + stream << " unsigned long int all_ones = ~0x0UL;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"(all_ones) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, pmpaddr0 \\n\" : \"=r\"(temp_reg));"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"(0x0) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " int g = 2;"; + stream << "\n"; + stream << indent_str; + stream << " for(uintptr_t i = 1; i; i<<=1) {"; + stream << "\n"; + stream << indent_str; + stream << " if((temp_reg & i) != 0)"; + stream << "\n"; + stream << indent_str; + stream << " break;"; + stream << "\n"; + stream << indent_str; + stream << " g++;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " granule = 1UL << g;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " return granule;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static int mismatch_addr_offset(int granule_size){"; + stream << "\n"; + stream << indent_str; + stream << " unsigned int addr_offset = "; + stream << pmp_addr_offset; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (addr_offset == 0x0){"; + stream << "\n"; + stream << indent_str; + stream << " return 0x0;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " else {"; + stream << "\n"; + stream << indent_str; + stream << " unsigned int mismatch_offset = granule_size;"; + stream << "\n"; + stream << indent_str; + stream << " while (mismatch_offset < addr_offset){"; + stream << "\n"; + stream << indent_str; + stream << " mismatch_offset = mismatch_offset << 0x1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " return mismatch_offset;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * On processor_t::reset():"; + stream << "\n"; + stream << indent_str; + stream << " * - set_csr(CSR_PMPADDR0, ~reg_t(0));"; + stream << "\n"; + stream << indent_str; + stream << " * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "static void set_cfg() {"; + stream << "\n"; + stream << indent_str; + stream << "#if 1"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * set MSECCFG_RLB to avoid locked"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " unsigned rlb_value = MSECCFG_RLB;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(rlb_value));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR."; + stream << "\n"; + stream << indent_str; + stream << " * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance."; + stream << "\n"; + stream << indent_str; + stream << " * Also use pmp3cfg for fixed U mode (U_MEM)."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * Here "; + stream << "@"; + stream << "pmp_addr_offset:int"; + stream << "@"; + stream << " is to create an address mismatch"; + stream << "\n"; + stream << indent_str; + stream << " * And "; + stream << "@"; + stream << "create_pmp_cfg:int"; + stream << "@"; + stream << " is to create cfg mismatch."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " unsigned int mismatch_offset = "; + stream << pmp_addr_offset; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (mismatch_offset != 0x0){"; + stream << "\n"; + stream << indent_str; + stream << " volatile int pmp_granularity = detect_pmp_granularity();"; + stream << "\n"; + stream << indent_str; + stream << " mismatch_offset = mismatch_addr_offset(pmp_granularity);"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr3, %0 \\n\" :: \"r\"(U_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr2, %0 \\n\" :: \"r\"(TEST_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr1, %0 \\n\" :: \"r\"((TEST_MEM_START + mismatch_offset) >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"((TEST_MEM_START >> 3) - 1) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr6, %0 \\n\" :: \"r\"(TEST_MEM_START >> 2) : \"memory\"); // for data"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr5, %0 \\n\" :: \"r\"(0x80010000 >> 2) : \"memory\"); // for code"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr4, %0 \\n\" :: \"r\"(0x80000000 >> 2) : \"memory\"); // addr start"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = PMP_OFF;"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8);"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // Only true for Spike"; + stream << "\n"; + stream << indent_str; + stream << "// asm volatile (\"csrr %0, pmpcfg0\\n\":\"=r\"(cfg0)); "; + stream << "\n"; + stream << indent_str; + stream << "// if (cfg0 != (PMP_R | PMP_W | PMP_X | PMP_NAPOT)) {"; + stream << "\n"; + stream << indent_str; + stream << "// exit(cfg0);"; + stream << "\n"; + stream << indent_str; + stream << "// }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if ("; + stream << set_sec_mml; + stream << ") { // need to set L bit for M mode code like trap_handling"; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= PMP_L;"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " cfg1 |= ((PMP_L << 8) | (PMP_L << 16));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM"; + stream << "\n"; + stream << indent_str; + stream << "#if "; + stream << create_pmp_cfg; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= ( ("; + stream << pmp_r; + stream << " ? PMP_R : 0)"; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pmp_w; + stream << " ? PMP_W : 0)"; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pmp_x; + stream << " ? PMP_X : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | PMP_TOR | ("; + stream << pmp_l; + stream << " ? PMP_L : 0)) << 16;"; + stream << "\n"; + stream << indent_str; + stream << "#endif "; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << "#if __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (cfg1 << 32);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg1, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg1)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "#endif // __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << "#endif // !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // set proc->state.mseccfg, for MML/MMWP"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned seccfg_bits = ("; + stream << set_sec_mml; + stream << " ? MSECCFG_MML : 0) | ("; + stream << set_sec_mmwp; + stream << " ? MSECCFG_MMWP : 0);"; + stream << "\n"; + stream << indent_str; + stream << " if (seccfg_bits) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(seccfg_bits));"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // currently dummy since tlb flushed when set_csr on mseccfg"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"fence.i \\n\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// from pmp_ok() side,W/R/X is similar"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void try_access() {"; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_RW"; + stream << "\n"; + stream << indent_str; + stream << " target_arr[0] += 1;"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned long delta = (unsigned long)0x1020304005060708ULL;"; + stream << "\n"; + stream << indent_str; + stream << " *(long *)target_arr += delta;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) {"; + stream << "\n"; + stream << indent_str; + stream << " actual_rw_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_FETCH"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 1; // reset inside target_foo()"; + stream << "\n"; + stream << indent_str; + stream << " target_foo();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// in case mml set, printf cannot be used in U mode"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_umode\")))"; + stream << "\n"; + stream << indent_str; + stream << "void try_access_umode() {"; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_RW"; + stream << "\n"; + stream << indent_str; + stream << " target_arr[0] += 1;"; + stream << "\n"; + stream << indent_str; + stream << "// const unsigned long delta = 0x1020304005060708UL;"; + stream << "\n"; + stream << indent_str; + stream << "// *(long *)target_arr += delta;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) {"; + stream << "\n"; + stream << indent_str; + stream << "// actual_rw_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << "// }"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_FETCH"; + stream << "\n"; + stream << indent_str; + stream << " target_foo_umode();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * switch to M mode by invoking a write access fault for special address."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS);"; + stream << "\n"; + stream << indent_str; + stream << " *p = 1;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult() {"; + stream << "\n"; + stream << indent_str; + stream << " int ret = 0;"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_rw_fail != actual_rw_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (expected_x_fail != actual_x_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 2;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " exit(ret); "; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "int main() {"; + stream << "\n"; + stream << indent_str; + stream << " // assert in M mode"; + stream << "\n"; + stream << indent_str; + stream << " set_cfg();"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " switch_mode(); // in case swith to u mode, branch to try_access_umode directly"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " try_access();"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " return 0; // assert 0"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + } +}; + +#endif // PMP_OK_1_GEN_CLASS_H diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.cc_skel b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.cc_skel new file mode 100644 index 00000000..ca23eb30 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.cc_skel @@ -0,0 +1,295 @@ + +/* + * @tag@ + * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel. + * + * This test program is to test pmp_ok() when share mode (RW=01). + * Based on other test cases for mseccfg stiky bits, this test expects following: + * - RW = 01. For RW != 01, less combinations to show it fail. + * - MML set + * - Regine matched. + * + * Remarks: + * - + */ + +/* + * Macros from encoding.h + */ +#define MSTATUS_MPP 0x00001800 + +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_L 0x80 +#define PMP_SHIFT 2 + +#define PMP_OFF 0x0 +#define PMP_TOR 0x08 +#define PMP_NA4 0x10 +#define PMP_NAPOT 0x18 + +#define MSECCFG_MML 0x1 +#define MSECCFG_MMWP 0x2 +#define MSECCFG_RLB 0x4 + +#define TEST_RW (1 - @typex:int@) +#define TEST_FETCH (@typex:int@) +/* + * Whether rwx share single cfg for M mode + * When @@set_sec_mml@@ set, it must be 0, otherwise unexpected exception + */ +#define M_MODE_RWX @m_mode_rwx:int@ + +#define CAUSE_LOAD_ACCESS 0x5 +#define CAUSE_STORE_ACCESS 0x7 + +typedef unsigned long reg_t; +typedef unsigned long uintptr_t; + +/* + * functions from syscalls.c + */ +#if PRINTF_SUPPORTED +int printf(const char* fmt, ...); +#else +#define printf(...) +#endif + +void __attribute__((noreturn)) tohost_exit(uintptr_t code); +void exit(int code); + +/* + * local status + */ +#define TEST_MEM_START 0x200000 +#define TEST_MEM_END 0x240000 +#define U_MEM_END (TEST_MEM_END + 0x10000) +#define FAKE_ADDRESS 0x10000000 + +static const unsigned long expected_r_fail = @expected_r_fail:int@; +static unsigned actual_r_fail = 0; + +static const unsigned long expected_w_fail = @expected_w_fail:int@; +static unsigned actual_w_fail = 0; + +static const unsigned long expected_x_fail = @expected_x_fail:int@; +static unsigned actual_x_fail = 0; + +static void checkTestResult(void); + +/* + * override syscalls.c. + * currently simply skip to nexp instruction + */ +uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + if (epc >= TEST_MEM_START && epc < TEST_MEM_END) { + asm volatile ("nop"); + actual_x_fail = 1; + checkTestResult(); + } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) { + reg_t addr; + asm volatile ("csrr %0, mtval\n" : "=r"(addr)); + if (addr >= TEST_MEM_START && addr < TEST_MEM_END) { + if (cause == CAUSE_LOAD_ACCESS) + actual_r_fail = 1; + else + actual_w_fail = 1; + return epc + 4; + } + + if (addr == FAKE_ADDRESS) { + asm volatile ("nop"); + asm volatile ("nop"); + checkTestResult(); + } + } + + printf("cause = %ld, epc = 0x%lx\n", cause, epc); + tohost_exit(1337); +} + + +static void switch_mode_access() { + reg_t tmp; + asm volatile ( + "li %0, %1\n" + "\tcsrc mstatus, t0\n" + "\tla %0, try_access_umode \n" + "\tcsrw mepc, %0\n" + "\tli sp, %2\n" + "\tmret\n" + : "=r"(tmp) : "n"(MSTATUS_MPP), "n"(U_MEM_END) : "memory"); +} + +__attribute ((section(".text_test_foo"), noinline)) +static void target_foo() { + asm volatile ("nop"); + actual_x_fail = 0; +} + +/* + * avoid to access actual_x_fail lies in M mode + */ +__attribute ((noinline)) +static void target_foo_umode() { + asm volatile ("nop"); +} + +__attribute ((section(".data_test_arr"), aligned(8))) +static volatile unsigned char target_arr[100] = { + 1,2,3,4,5,6,7,8, +}; + +/* + * On processor_t::reset(): + * - set_csr(CSR_PMPADDR0, ~reg_t(0)); + * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT); + */ +static void set_cfg() { +#if 1 + /* + * set MSECCFG_RLB to avoid locked + */ + unsigned rlb_value = MSECCFG_RLB; + asm volatile ("csrs 0x747, %0 \n"::"r"(rlb_value)); +#endif + + /* + * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR. + * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance. + * Also use pmp3cfg for fixed U mode (U_MEM). + */ + asm volatile ("csrw pmpaddr3, %0 \n" :: "r"(U_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr2, %0 \n" :: "r"(TEST_MEM_END >> 2) : "memory"); + asm volatile ("csrw pmpaddr1, %0 \n" :: "r"((TEST_MEM_START) >> 2) : "memory"); + +#if M_MODE_RWX + asm volatile ("csrw pmpaddr0, %0 \n" :: "r"((TEST_MEM_START >> 3) - 1) : "memory"); + reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT); +#else + asm volatile ("csrw pmpaddr6, %0 \n" :: "r"(TEST_MEM_START >> 2) : "memory"); // for data + asm volatile ("csrw pmpaddr5, %0 \n" :: "r"(0x80010000 >> 2) : "memory"); // for code + asm volatile ("csrw pmpaddr4, %0 \n" :: "r"(0x80000000 >> 2) : "memory"); // addr start + reg_t cfg0 = PMP_OFF; + reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8); +#endif + + // need to set L bit for M mode before set MML +#if M_MODE_RWX + cfg0 |= PMP_L; +#else + cfg1 |= ((PMP_L << 8) | (PMP_L << 16)); +#endif + +#if !M_MODE_RWX +#if __riscv_xlen == 64 + cfg0 |= (cfg1 << 32); +#else + asm volatile ("csrw pmpcfg1, %0 \n" + : + : "r"(cfg1) + : "memory"); +#endif // __riscv_xlen == 64 +#endif // !M_MODE_RWX + + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + // set proc->state.mseccfg, for MML/MMWP + const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP; + asm volatile ("csrs 0x747, %0 \n"::"r"(seccfg_bits)); + + // after set MML, RW=01 is possible + cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM + cfg0 |= ((@pmp_r:int@ ? PMP_R : 0) // for TEST_MEM + | PMP_W + | (@pmp_x:int@ ? PMP_X : 0) + | (@pmp_l:int@ ? PMP_L : 0) + | PMP_TOR) << 16; + asm volatile ("csrw pmpcfg0, %0 \n" + : + : "r"(cfg0) + : "memory"); + + // currently dummy since tlb flushed when set_csr on mseccfg + asm volatile ("fence.i \n"); +} + +// from pmp_ok() side,W/R/X is similar +__attribute ((noinline)) +static void try_access() { +#if TEST_RW + target_arr[0] += 1; + const unsigned long delta = (unsigned long)0x1020304005060708ULL; + *(long *)target_arr += delta; + + if (actual_r_fail == 0 && actual_w_fail == 0) { + if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) { + actual_r_fail = 1; + actual_w_fail = 1; + } + } +#endif + +#if TEST_FETCH + actual_x_fail = 1; // reset inside target_foo() + target_foo(); +#endif +} + +// in case mml set, printf cannot be used in U mode +__attribute ((section(".text_umode"))) +void try_access_umode() { +#if TEST_RW + target_arr[0] += 1; +// const unsigned long delta = 0x1020304005060708UL; +// *(long *)target_arr += delta; + +// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) { +// actual_rw_fail = 1; +// } +#endif + +#if TEST_FETCH + target_foo_umode(); +#endif + + /* + * switch to M mode by invoking a write access fault for special address. + */ + volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS); + *p = 1; +} + +static void checkTestResult() { + int ret = 0; + if (expected_r_fail != actual_r_fail) { + ret += 1; + } + if (expected_w_fail != actual_w_fail) { + ret += 2; + } + if (expected_x_fail != actual_x_fail) { + ret += 4; + } + + + exit(ret); +} + +int main() { + // assert in M mode + set_cfg(); + + try_access(); +#if @enable_umode_test:int@ + switch_mode_access(); // access in umode and report final result +#else + checkTestResult(); +#endif + return 0; // assert 0 +} diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.h b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.h new file mode 100644 index 00000000..7fb51808 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_src/test_pmp_ok_share_1.h @@ -0,0 +1,997 @@ +/* + * File automatically generated by + * gengen 1.4.2 by Lorenzo Bettini + * http://www.gnu.org/software/gengen + */ + +#ifndef PMP_OK_SHARE_1_GEN_CLASS_H +#define PMP_OK_SHARE_1_GEN_CLASS_H + +#include +#include + +using std::string; +using std::ostream; + +class pmp_ok_share_1_gen_class +{ + protected: + int enable_umode_test; + int expected_r_fail; + int expected_w_fail; + int expected_x_fail; + int m_mode_rwx; + int pmp_l; + int pmp_r; + int pmp_x; + string tag; + int typex; + + public: + pmp_ok_share_1_gen_class() : + enable_umode_test (0), expected_r_fail (0), expected_w_fail (0), expected_x_fail (0), m_mode_rwx (0), pmp_l (0), pmp_r (0), pmp_x (0), typex (0) + { + } + + pmp_ok_share_1_gen_class(int _enable_umode_test, int _expected_r_fail, int _expected_w_fail, int _expected_x_fail, int _m_mode_rwx, int _pmp_l, int _pmp_r, int _pmp_x, const string &_tag, int _typex) : + enable_umode_test (_enable_umode_test), expected_r_fail (_expected_r_fail), expected_w_fail (_expected_w_fail), expected_x_fail (_expected_x_fail), m_mode_rwx (_m_mode_rwx), pmp_l (_pmp_l), pmp_r (_pmp_r), pmp_x (_pmp_x), tag (_tag), typex (_typex) + { + } + + void set_enable_umode_test(int _enable_umode_test) + { + enable_umode_test = _enable_umode_test; + } + + void set_expected_r_fail(int _expected_r_fail) + { + expected_r_fail = _expected_r_fail; + } + + void set_expected_w_fail(int _expected_w_fail) + { + expected_w_fail = _expected_w_fail; + } + + void set_expected_x_fail(int _expected_x_fail) + { + expected_x_fail = _expected_x_fail; + } + + void set_m_mode_rwx(int _m_mode_rwx) + { + m_mode_rwx = _m_mode_rwx; + } + + void set_pmp_l(int _pmp_l) + { + pmp_l = _pmp_l; + } + + void set_pmp_r(int _pmp_r) + { + pmp_r = _pmp_r; + } + + void set_pmp_x(int _pmp_x) + { + pmp_x = _pmp_x; + } + + void set_tag(const string &_tag) + { + tag = _tag; + } + + void set_typex(int _typex) + { + typex = _typex; + } + + void generate_pmp_ok_share_1(ostream &stream, unsigned int indent = 0) + { + string indent_str (indent, ' '); + indent = 0; + + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << tag; + stream << "\n"; + stream << indent_str; + stream << " * Generated from gen_pmp_test.cc and test_pmp_ok_share_1.cc_skel."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * This test program is to test pmp_ok() when share mode (RW=01)."; + stream << "\n"; + stream << indent_str; + stream << " * Based on other test cases for mseccfg stiky bits, this test expects following:"; + stream << "\n"; + stream << indent_str; + stream << " * - RW = 01. For RW != 01, less combinations to show it fail."; + stream << "\n"; + stream << indent_str; + stream << " * - MML set"; + stream << "\n"; + stream << indent_str; + stream << " * - Regine matched."; + stream << "\n"; + stream << indent_str; + stream << " * "; + stream << "\n"; + stream << indent_str; + stream << " * Remarks:"; + stream << "\n"; + stream << indent_str; + stream << " * - "; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Macros from encoding.h"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define MSTATUS_MPP 0x00001800"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_R 0x01"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_W 0x02"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_X 0x04"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_A 0x18"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_L 0x80"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_SHIFT 2"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_OFF 0x0"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_TOR 0x08"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NA4 0x10"; + stream << "\n"; + stream << indent_str; + stream << "#define PMP_NAPOT 0x18"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MML 0x1"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_MMWP 0x2"; + stream << "\n"; + stream << indent_str; + stream << "#define MSECCFG_RLB 0x4"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_RW (1 - "; + stream << typex; + stream << ")"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_FETCH ("; + stream << typex; + stream << ")"; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * Whether rwx share single cfg for M mode"; + stream << "\n"; + stream << indent_str; + stream << " * When "; + stream << "@"; + stream << "set_sec_mml"; + stream << "@"; + stream << " set, it must be 0, otherwise unexpected exception"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define M_MODE_RWX "; + stream << m_mode_rwx; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_LOAD_ACCESS 0x5"; + stream << "\n"; + stream << indent_str; + stream << "#define CAUSE_STORE_ACCESS 0x7"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long reg_t;"; + stream << "\n"; + stream << indent_str; + stream << "typedef unsigned long uintptr_t;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * functions from syscalls.c"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#if PRINTF_SUPPORTED"; + stream << "\n"; + stream << indent_str; + stream << "int printf(const char* fmt, ...);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << "#define printf(...)"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "void __attribute__((noreturn)) tohost_exit(uintptr_t code);"; + stream << "\n"; + stream << indent_str; + stream << "void exit(int code);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * local status"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_START 0x200000"; + stream << "\n"; + stream << indent_str; + stream << "#define TEST_MEM_END 0x240000"; + stream << "\n"; + stream << indent_str; + stream << "#define U_MEM_END (TEST_MEM_END + 0x10000)"; + stream << "\n"; + stream << indent_str; + stream << "#define FAKE_ADDRESS 0x10000000"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_r_fail = "; + stream << expected_r_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_r_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_w_fail = "; + stream << expected_w_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_w_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static const unsigned long expected_x_fail = "; + stream << expected_x_fail; + stream << ";"; + stream << "\n"; + stream << indent_str; + stream << "static unsigned actual_x_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult(void);"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * override syscalls.c."; + stream << "\n"; + stream << indent_str; + stream << " * currently simply skip to nexp instruction"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "uintptr_t handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32])"; + stream << "\n"; + stream << indent_str; + stream << "{"; + stream << "\n"; + stream << indent_str; + stream << " if (epc >= TEST_MEM_START && epc < TEST_MEM_END) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " } else if (cause == CAUSE_LOAD_ACCESS || cause == CAUSE_STORE_ACCESS) {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t addr;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrr %0, mtval\\n\" : \"=r\"(addr));"; + stream << "\n"; + stream << indent_str; + stream << " if (addr >= TEST_MEM_START && addr < TEST_MEM_END) {"; + stream << "\n"; + stream << indent_str; + stream << " if (cause == CAUSE_LOAD_ACCESS)"; + stream << "\n"; + stream << indent_str; + stream << " actual_r_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " else "; + stream << "\n"; + stream << indent_str; + stream << " actual_w_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " return epc + 4;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 8; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " if (addr == FAKE_ADDRESS) {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " printf(\"cause = %ld, epc = 0x%lx\\n\", cause, epc);"; + stream << "\n"; + stream << indent_str; + stream << " tohost_exit(1337);"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void switch_mode_access() {"; + stream << "\n"; + stream << indent_str; + stream << " reg_t tmp;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile ("; + stream << "\n"; + stream << indent_str; + stream << " \"li %0, %1\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrc mstatus, t0\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tla %0, try_access_umode \\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tcsrw mepc, %0\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tli sp, %2\\n\""; + stream << "\n"; + stream << indent_str; + stream << " \"\\tmret\\n\""; + stream << "\n"; + stream << indent_str; + stream << " : \"=r\"(tmp) : \"n\"(MSTATUS_MPP), \"n\"(U_MEM_END) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_test_foo\"), noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void target_foo() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 0;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * avoid to access actual_x_fail lies in M mode"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void target_foo_umode() {"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"nop\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".data_test_arr\"), aligned(8)))"; + stream << "\n"; + stream << indent_str; + stream << "static volatile unsigned char target_arr[100] = {"; + stream << "\n"; + stream << indent_str; + stream << " 1,2,3,4,5,6,7,8,"; + stream << "\n"; + stream << indent_str; + stream << "};"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "/*"; + stream << "\n"; + stream << indent_str; + stream << " * On processor_t::reset():"; + stream << "\n"; + stream << indent_str; + stream << " * - set_csr(CSR_PMPADDR0, ~reg_t(0));"; + stream << "\n"; + stream << indent_str; + stream << " * set_csr(CSR_PMPCFG0, PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << "static void set_cfg() {"; + stream << "\n"; + stream << indent_str; + stream << "#if 1"; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * set MSECCFG_RLB to avoid locked"; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " unsigned rlb_value = MSECCFG_RLB;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(rlb_value));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * Set pmp0cfg for M mode (M_MEM), and pmp1cfg for base of TOR."; + stream << "\n"; + stream << indent_str; + stream << " * Then use pmp2cfg for TEST_MEM. Both test code and data share PMP entrance."; + stream << "\n"; + stream << indent_str; + stream << " * Also use pmp3cfg for fixed U mode (U_MEM)."; + stream << "\n"; + stream << indent_str; + stream << " */"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr3, %0 \\n\" :: \"r\"(U_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr2, %0 \\n\" :: \"r\"(TEST_MEM_END >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr1, %0 \\n\" :: \"r\"((TEST_MEM_START) >> 2) : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr0, %0 \\n\" :: \"r\"((TEST_MEM_START >> 3) - 1) : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = (PMP_R | PMP_W | PMP_X | PMP_NAPOT);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr6, %0 \\n\" :: \"r\"(TEST_MEM_START >> 2) : \"memory\"); // for data"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr5, %0 \\n\" :: \"r\"(0x80010000 >> 2) : \"memory\"); // for code"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpaddr4, %0 \\n\" :: \"r\"(0x80000000 >> 2) : \"memory\"); // addr start"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg0 = PMP_OFF;"; + stream << "\n"; + stream << indent_str; + stream << " reg_t cfg1 = PMP_OFF | ((PMP_R | PMP_W | PMP_TOR) << 16) | ((PMP_X | PMP_TOR) << 8);"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " // need to set L bit for M mode before set MML"; + stream << "\n"; + stream << indent_str; + stream << "#if M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= PMP_L;"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " cfg1 |= ((PMP_L << 8) | (PMP_L << 16));"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 8; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << "#if !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + stream << "#if __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (cfg1 << 32);"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg1, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg1)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << "#endif // __riscv_xlen == 64"; + stream << "\n"; + stream << indent_str; + stream << "#endif // !M_MODE_RWX"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + stream << " // set proc->state.mseccfg, for MML/MMWP"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned seccfg_bits = MSECCFG_MML | MSECCFG_MMWP;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrs 0x747, %0 \\n\"::\"r\"(seccfg_bits));"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // after set MML, RW=01 is possible"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (PMP_R | PMP_W | PMP_X | PMP_TOR) << 24; // for U_MEM"; + stream << "\n"; + stream << indent_str; + stream << " cfg0 |= (("; + stream << pmp_r; + stream << " ? PMP_R : 0) // for TEST_MEM"; + stream << "\n"; + stream << indent_str; + stream << " | PMP_W"; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pmp_x; + stream << " ? PMP_X : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | ("; + stream << pmp_l; + stream << " ? PMP_L : 0) "; + stream << "\n"; + stream << indent_str; + stream << " | PMP_TOR) << 16;"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"csrw pmpcfg0, %0 \\n\""; + stream << "\n"; + stream << indent_str; + stream << " :"; + stream << "\n"; + stream << indent_str; + stream << " : \"r\"(cfg0)"; + stream << "\n"; + stream << indent_str; + stream << " : \"memory\");"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " // currently dummy since tlb flushed when set_csr on mseccfg"; + stream << "\n"; + stream << indent_str; + stream << " asm volatile (\"fence.i \\n\");"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// from pmp_ok() side,W/R/X is similar"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((noinline))"; + stream << "\n"; + stream << indent_str; + stream << "static void try_access() {"; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_RW"; + stream << "\n"; + stream << indent_str; + stream << " target_arr[0] += 1;"; + stream << "\n"; + stream << indent_str; + stream << " const unsigned long delta = (unsigned long)0x1020304005060708ULL;"; + stream << "\n"; + stream << indent_str; + stream << " *(long *)target_arr += delta;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " if (actual_r_fail == 0 && actual_w_fail == 0) {"; + stream << "\n"; + stream << indent_str; + stream << " if (*(long *)target_arr != (unsigned long)0x0807060504030201ULL + delta + 1) {"; + stream << "\n"; + stream << indent_str; + stream << " actual_r_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " actual_w_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_FETCH"; + stream << "\n"; + stream << indent_str; + stream << " actual_x_fail = 1; // reset inside target_foo()"; + stream << "\n"; + stream << indent_str; + stream << " target_foo();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// in case mml set, printf cannot be used in U mode"; + stream << "\n"; + stream << indent_str; + stream << "__attribute ((section(\".text_umode\")))"; + stream << "\n"; + stream << indent_str; + stream << "void try_access_umode() {"; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_RW"; + stream << "\n"; + stream << indent_str; + stream << " target_arr[0] += 1;"; + stream << "\n"; + stream << indent_str; + stream << "// const unsigned long delta = 0x1020304005060708UL;"; + stream << "\n"; + stream << indent_str; + stream << "// *(long *)target_arr += delta;"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "// if (*(long *)target_arr != 0x0807060504030201UL + delta + 1) {"; + stream << "\n"; + stream << indent_str; + stream << "// actual_rw_fail = 1;"; + stream << "\n"; + stream << indent_str; + stream << "// }"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "#if TEST_FETCH"; + stream << "\n"; + stream << indent_str; + stream << " target_foo_umode();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " /*"; + stream << "\n"; + stream << indent_str; + stream << " * switch to M mode by invoking a write access fault for special address."; + stream << "\n"; + stream << indent_str; + stream << " */ "; + stream << "\n"; + stream << indent_str; + stream << " volatile unsigned char * p = (unsigned char *)(FAKE_ADDRESS);"; + stream << "\n"; + stream << indent_str; + stream << " *p = 1;"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "static void checkTestResult() {"; + stream << "\n"; + stream << indent_str; + stream << " int ret = 0;"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_r_fail != actual_r_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 1;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_w_fail != actual_w_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 2;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + stream << " if (expected_x_fail != actual_x_fail) {"; + stream << "\n"; + stream << indent_str; + stream << " ret += 4;"; + stream << "\n"; + stream << indent_str; + stream << " }"; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + indent = 4; + stream << " "; + indent = 0; + stream << "\n"; + stream << indent_str; + stream << " exit(ret); "; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << "int main() {"; + stream << "\n"; + stream << indent_str; + stream << " // assert in M mode"; + stream << "\n"; + stream << indent_str; + stream << " set_cfg();"; + stream << "\n"; + stream << indent_str; + stream << "\n"; + stream << indent_str; + stream << " try_access();"; + stream << "\n"; + stream << indent_str; + stream << "#if "; + stream << enable_umode_test; + stream << "\n"; + stream << indent_str; + stream << " switch_mode_access(); // access in umode and report final result"; + stream << "\n"; + stream << indent_str; + stream << "#else"; + stream << "\n"; + stream << indent_str; + stream << " checkTestResult();"; + stream << "\n"; + stream << indent_str; + stream << "#endif"; + stream << "\n"; + stream << indent_str; + stream << " return 0; // assert 0"; + stream << "\n"; + stream << indent_str; + stream << "}"; + stream << "\n"; + stream << indent_str; + } +}; + +#endif // PMP_OK_SHARE_1_GEN_CLASS_H diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/Makefile.inc b/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/Makefile.inc new file mode 100644 index 00000000..acdb1fbe --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/Makefile.inc @@ -0,0 +1,30 @@ + +ifeq ($(shell uname),Linux) + export PLATFORM := Linux +else +ifeq ($(findstring CYGWIN,$(shell uname)),CYGWIN) + export PLATFORM := CygWin +else #MINGW + export PLATFORM := MinGW +endif +endif + +PWD := $(shell pwd) +gengen = ${PWD}/${dir}/tool/gengen-1.4.2/build/${PLATFORM}/bin/gengen + +default: + for skel in ${cc_skel_list}; do\ + $(gengen) -i $$skel.cc_skel --file-name $$skel.h --gen-name $$skel || exit 1; \ + done +gen: + -rm -f ../*.c + g++ -O2 ${cc_file}.cc -o a.out + ./a.out + find .. -name "*.c" | wc -l + +clean: $(OPT_CLEAN) + -rm -f ../*.c + -rm -f a.out + for skel in ${cc_skel_list}; do\ + rm -f $$skel.h || exit 1; \ + done diff --git a/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/gengen b/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/gengen new file mode 100755 index 00000000..e5b4e95c Binary files /dev/null and b/vendor/riscv-isa-sim/tests/mseccfg/gengen_tool/gengen differ diff --git a/vendor/riscv-isa-sim/tests/mseccfg/mseccfg_test.ld b/vendor/riscv-isa-sim/tests/mseccfg/mseccfg_test.ld new file mode 100644 index 00000000..2b3556f6 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/mseccfg_test.ld @@ -0,0 +1,79 @@ +/*======================================================================*/ +/* pmp testlinker script */ +/*======================================================================*/ + +MEMORY +{ + /* + * default location + * NAPOT is used. Thus size must be 2^n. + * For easier handling, esp. when MML set, trap handler has separated stack at GP + * - Currently allocate 0x140000. Just avoid overlap with _end. + */ + M_MEM (AX) : ORIGIN = 0x100000, LENGTH = 1M + + RESERVED : ORIGIN = 0x000000, LENGTH = 1M + + TEST_MEM (AX) : ORIGIN = 0x200000, LENGTH = 256K + + U_MEM (AX) : ORIGIN = 0x240000, LENGTH = 64K +} + +OUTPUT_ARCH( "riscv" ) +ENTRY(_start) + +/*----------------------------------------------------------------------*/ +/* Sections */ +/*----------------------------------------------------------------------*/ + +SECTIONS +{ + . = 0x100000; + __global_pointer$ = 0x140000; + + .text : ALIGN(256) { + *(.text.init) + *(.text) *(.text.*) *(.device_code) . = ALIGN(256); . = 0x10000; } + PROVIDE (__TEXT_END = .); + + .tohost : { *(.tohost) } + .rodata : ALIGN(256) { *(.rodata) *(.rodata.*) } + .sdata : ALIGN(256) { + *(.srodata.cst16) *(.srodata.cst8) *(.srodata.cst4) *(.srodata.cst2) *(.srodata*) + *(.sdata .sdata.* .gnu.linkonce.s.*) + } + + /* data sections, mostly for host to deal with input and output data */ + .data : {LONG(0xdeadbeef) *(.data) } + + /* bss segment */ + .sbss : { + *(.sbss .sbss.* .gnu.linkonce.sb.*) + *(.scommon) + } + .bss : { *(.bss) } + + /* + * thread-local data segment. + * Copied to TCM at start by init_tls(). + */ + .tdata : ALIGN(256) + { + _tdata_begin = .; + *(.tls_start) *(.tdata) *(.tdata.*) + _tdata_end = .; + } + .tbss : + { + *(.tbss) *(.tbss.*) + _tbss_end = .; + } + + /* End of uninitalized data segement, as the start for 128KB stack + TLS */ + _end = 0x180000; + + .umode : { *(.text_umode) *(.data_umode) } > U_MEM + + .test : { *(.text_test_foo) *(.data_test_arr) } > TEST_MEM +} + diff --git a/vendor/riscv-isa-sim/tests/mseccfg/pmp_enhancement_sail_spike_unit_test.doc b/vendor/riscv-isa-sim/tests/mseccfg/pmp_enhancement_sail_spike_unit_test.doc new file mode 100644 index 00000000..dcb42422 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/pmp_enhancement_sail_spike_unit_test.doc @@ -0,0 +1,3409 @@ +MIME-Version: 1.0 +Content-Type: multipart/related; boundary="----=_NextPart_01D7437B.526C0BD0" + +This document is a Single File Web Page, also known as a Web Archive file. If you are seeing this message, your browser or editor doesn't support Web Archive files. Please download a browser that supports Web Archive. + +------=_NextPart_01D7437B.526C0BD0 +Content-Location: file:///C:/2AEBA2D4/pmp+enhancement+-+spike,+sail+and+unit+test.htm +Content-Transfer-Encoding: base64 +Content-Type: text/html; charset="unicode" + +//48AGgAdABtAGwAIAB4AG0AbABuAHMAOgB2AD0AIgB1AHIAbgA6AHMAYwBoAGUAbQBhAHMALQBt +AGkAYwByAG8AcwBvAGYAdAAtAGMAbwBtADoAdgBtAGwAIgANAAoAeABtAGwAbgBzADoAbwA9ACIA +dQByAG4AOgBzAGMAaABlAG0AYQBzAC0AbQBpAGMAcgBvAHMAbwBmAHQALQBjAG8AbQA6AG8AZgBm +AGkAYwBlADoAbwBmAGYAaQBjAGUAIgANAAoAeABtAGwAbgBzADoAdwA9ACIAdQByAG4AOgBzAGMA +aABlAG0AYQBzAC0AbQBpAGMAcgBvAHMAbwBmAHQALQBjAG8AbQA6AG8AZgBmAGkAYwBlADoAdwBv +AHIAZAAiAA0ACgB4AG0AbABuAHMAOgBtAD0AIgBoAHQAdABwADoALwAvAHMAYwBoAGUAbQBhAHMA +LgBtAGkAYwByAG8AcwBvAGYAdAAuAGMAbwBtAC8AbwBmAGYAaQBjAGUALwAyADAAMAA0AC8AMQAy +AC8AbwBtAG0AbAAiAA0ACgB4AG0AbABuAHMAPQAiAGgAdAB0AHAAOgAvAC8AdwB3AHcALgB3ADMA +LgBvAHIAZwAvAFQAUgAvAFIARQBDAC0AaAB0AG0AbAA0ADAAIgA+AA0ACgANAAoAPABoAGUAYQBk +AD4ADQAKADwAbQBlAHQAYQAgAGgAdAB0AHAALQBlAHEAdQBpAHYAPQBDAG8AbgB0AGUAbgB0AC0A +VAB5AHAAZQAgAGMAbwBuAHQAZQBuAHQAPQAiAHQAZQB4AHQALwBoAHQAbQBsADsAIABjAGgAYQBy +AHMAZQB0AD0AdQBuAGkAYwBvAGQAZQAiAD4ADQAKADwAbQBlAHQAYQAgAG4AYQBtAGUAPQBQAHIA +bwBnAEkAZAAgAGMAbwBuAHQAZQBuAHQAPQBXAG8AcgBkAC4ARABvAGMAdQBtAGUAbgB0AD4ADQAK +ADwAbQBlAHQAYQAgAG4AYQBtAGUAPQBHAGUAbgBlAHIAYQB0AG8AcgAgAGMAbwBuAHQAZQBuAHQA +PQAiAE0AaQBjAHIAbwBzAG8AZgB0ACAAVwBvAHIAZAAgADEANQAiAD4ADQAKADwAbQBlAHQAYQAg +AG4AYQBtAGUAPQBPAHIAaQBnAGkAbgBhAHQAbwByACAAYwBvAG4AdABlAG4AdAA9ACIATQBpAGMA +cgBvAHMAbwBmAHQAIABXAG8AcgBkACAAMQA1ACIAPgANAAoAPABsAGkAbgBrACAAcgBlAGwAPQBG +AGkAbABlAC0ATABpAHMAdAANAAoAaAByAGUAZgA9ACIAcABtAHAAKwBlAG4AaABhAG4AYwBlAG0A +ZQBuAHQAKwAtACsAcwBwAGkAawBlACwAKwBzAGEAaQBsACsAYQBuAGQAKwB1AG4AaQB0ACsAdABl +AHMAdABfAGYAaQBsAGUAcwAvAGYAaQBsAGUAbABpAHMAdAAuAHgAbQBsACIAPgANAAoAPAB0AGkA +dABsAGUAPgBwAG0AcAAgAGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAgAC0AIABzAHAAaQBrAGUALAAg +AHMAYQBpAGwAIABhAG4AZAAgAHUAbgBpAHQAIAB0AGUAcwB0ADwALwB0AGkAdABsAGUAPgANAAoA +PAAhAC0ALQBbAGkAZgAgAGcAdABlACAAbQBzAG8AIAA5AF0APgA8AHgAbQBsAD4ADQAKACAAPABv +ADoARABvAGMAdQBtAGUAbgB0AFAAcgBvAHAAZQByAHQAaQBlAHMAPgANAAoAIAAgADwAbwA6AEEA +dQB0AGgAbwByAD4AUwBvAGIAZQByACAATABpAHUAPAAvAG8AOgBBAHUAdABoAG8AcgA+AA0ACgAg +ACAAPABvADoATABhAHMAdABBAHUAdABoAG8AcgA+AFMAbwBiAGUAcgAgAEwAaQB1ADwALwBvADoA +TABhAHMAdABBAHUAdABoAG8AcgA+AA0ACgAgACAAPABvADoAUgBlAHYAaQBzAGkAbwBuAD4ANAA8 +AC8AbwA6AFIAZQB2AGkAcwBpAG8AbgA+AA0ACgAgACAAPABvADoAVABvAHQAYQBsAFQAaQBtAGUA +PgA3ADwALwBvADoAVABvAHQAYQBsAFQAaQBtAGUAPgANAAoAIAAgADwAbwA6AEMAcgBlAGEAdABl +AGQAPgAyADAAMgAxAC0AMAA1AC0AMAA3AFQAMQAxADoANAA3ADoAMAAwAFoAPAAvAG8AOgBDAHIA +ZQBhAHQAZQBkAD4ADQAKACAAIAA8AG8AOgBMAGEAcwB0AFMAYQB2AGUAZAA+ADIAMAAyADEALQAw +ADUALQAwADcAVAAxADEAOgA1ADgAOgAwADAAWgA8AC8AbwA6AEwAYQBzAHQAUwBhAHYAZQBkAD4A +DQAKACAAIAA8AG8AOgBQAGEAZwBlAHMAPgAxADwALwBvADoAUABhAGcAZQBzAD4ADQAKACAAIAA8 +AG8AOgBXAG8AcgBkAHMAPgA4ADEANgA8AC8AbwA6AFcAbwByAGQAcwA+AA0ACgAgACAAPABvADoA +QwBoAGEAcgBhAGMAdABlAHIAcwA+ADQANgA1ADIAPAAvAG8AOgBDAGgAYQByAGEAYwB0AGUAcgBz +AD4ADQAKACAAIAA8AG8AOgBMAGkAbgBlAHMAPgAzADgAPAAvAG8AOgBMAGkAbgBlAHMAPgANAAoA +IAAgADwAbwA6AFAAYQByAGEAZwByAGEAcABoAHMAPgAxADAAPAAvAG8AOgBQAGEAcgBhAGcAcgBh +AHAAaABzAD4ADQAKACAAIAA8AG8AOgBDAGgAYQByAGEAYwB0AGUAcgBzAFcAaQB0AGgAUwBwAGEA +YwBlAHMAPgA1ADQANQA4ADwALwBvADoAQwBoAGEAcgBhAGMAdABlAHIAcwBXAGkAdABoAFMAcABh +AGMAZQBzAD4ADQAKACAAIAA8AG8AOgBWAGUAcgBzAGkAbwBuAD4AMQA2AC4AMAAwADwALwBvADoA +VgBlAHIAcwBpAG8AbgA+AA0ACgAgADwALwBvADoARABvAGMAdQBtAGUAbgB0AFAAcgBvAHAAZQBy +AHQAaQBlAHMAPgANAAoAIAA8AG8AOgBPAGYAZgBpAGMAZQBEAG8AYwB1AG0AZQBuAHQAUwBlAHQA +dABpAG4AZwBzAD4ADQAKACAAIAA8AG8AOgBBAGwAbABvAHcAUABOAEcALwA+AA0ACgAgADwALwBv +ADoATwBmAGYAaQBjAGUARABvAGMAdQBtAGUAbgB0AFMAZQB0AHQAaQBuAGcAcwA+AA0ACgA8AC8A +eABtAGwAPgA8ACEAWwBlAG4AZABpAGYAXQAtAC0APgANAAoAPABsAGkAbgBrACAAcgBlAGwAPQB0 +AGgAZQBtAGUARABhAHQAYQANAAoAaAByAGUAZgA9ACIAcABtAHAAKwBlAG4AaABhAG4AYwBlAG0A +ZQBuAHQAKwAtACsAcwBwAGkAawBlACwAKwBzAGEAaQBsACsAYQBuAGQAKwB1AG4AaQB0ACsAdABl +AHMAdABfAGYAaQBsAGUAcwAvAHQAaABlAG0AZQBkAGEAdABhAC4AdABoAG0AeAAiAD4ADQAKADwA +bABpAG4AawAgAHIAZQBsAD0AYwBvAGwAbwByAFMAYwBoAGUAbQBlAE0AYQBwAHAAaQBuAGcADQAK +AGgAcgBlAGYAPQAiAHAAbQBwACsAZQBuAGgAYQBuAGMAZQBtAGUAbgB0ACsALQArAHMAcABpAGsA +ZQAsACsAcwBhAGkAbAArAGEAbgBkACsAdQBuAGkAdAArAHQAZQBzAHQAXwBmAGkAbABlAHMALwBj +AG8AbABvAHIAcwBjAGgAZQBtAGUAbQBhAHAAcABpAG4AZwAuAHgAbQBsACIAPgANAAoAPAAhAC0A +LQBbAGkAZgAgAGcAdABlACAAbQBzAG8AIAA5AF0APgA8AHgAbQBsAD4ADQAKACAAPAB3ADoAVwBv +AHIAZABEAG8AYwB1AG0AZQBuAHQAPgANAAoAIAAgADwAdwA6AFMAcABlAGwAbABpAG4AZwBTAHQA +YQB0AGUAPgBDAGwAZQBhAG4APAAvAHcAOgBTAHAAZQBsAGwAaQBuAGcAUwB0AGEAdABlAD4ADQAK +ACAAIAA8AHcAOgBUAHIAYQBjAGsATQBvAHYAZQBzAD4AZgBhAGwAcwBlADwALwB3ADoAVAByAGEA +YwBrAE0AbwB2AGUAcwA+AA0ACgAgACAAPAB3ADoAVAByAGEAYwBrAEYAbwByAG0AYQB0AHQAaQBu +AGcALwA+AA0ACgAgACAAPAB3ADoAVgBhAGwAaQBkAGEAdABlAEEAZwBhAGkAbgBzAHQAUwBjAGgA +ZQBtAGEAcwAvAD4ADQAKACAAIAA8AHcAOgBTAGEAdgBlAEkAZgBYAE0ATABJAG4AdgBhAGwAaQBk +AD4AZgBhAGwAcwBlADwALwB3ADoAUwBhAHYAZQBJAGYAWABNAEwASQBuAHYAYQBsAGkAZAA+AA0A +CgAgACAAPAB3ADoASQBnAG4AbwByAGUATQBpAHgAZQBkAEMAbwBuAHQAZQBuAHQAPgBmAGEAbABz +AGUAPAAvAHcAOgBJAGcAbgBvAHIAZQBNAGkAeABlAGQAQwBvAG4AdABlAG4AdAA+AA0ACgAgACAA +PAB3ADoAQQBsAHcAYQB5AHMAUwBoAG8AdwBQAGwAYQBjAGUAaABvAGwAZABlAHIAVABlAHgAdAA+ +AGYAYQBsAHMAZQA8AC8AdwA6AEEAbAB3AGEAeQBzAFMAaABvAHcAUABsAGEAYwBlAGgAbwBsAGQA +ZQByAFQAZQB4AHQAPgANAAoAIAAgADwAdwA6AEQAbwBOAG8AdABQAHIAbwBtAG8AdABlAFEARgAv +AD4ADQAKACAAIAA8AHcAOgBMAGkAZABUAGgAZQBtAGUATwB0AGgAZQByAD4ARQBOAC0AVQBTADwA +LwB3ADoATABpAGQAVABoAGUAbQBlAE8AdABoAGUAcgA+AA0ACgAgACAAPAB3ADoATABpAGQAVABo +AGUAbQBlAEEAcwBpAGEAbgA+AFgALQBOAE8ATgBFADwALwB3ADoATABpAGQAVABoAGUAbQBlAEEA +cwBpAGEAbgA+AA0ACgAgACAAPAB3ADoATABpAGQAVABoAGUAbQBlAEMAbwBtAHAAbABlAHgAUwBj +AHIAaQBwAHQAPgBYAC0ATgBPAE4ARQA8AC8AdwA6AEwAaQBkAFQAaABlAG0AZQBDAG8AbQBwAGwA +ZQB4AFMAYwByAGkAcAB0AD4ADQAKACAAIAA8AHcAOgBDAG8AbQBwAGEAdABpAGIAaQBsAGkAdAB5 +AD4ADQAKACAAIAAgADwAdwA6AEIAcgBlAGEAawBXAHIAYQBwAHAAZQBkAFQAYQBiAGwAZQBzAC8A +PgANAAoAIAAgACAAPAB3ADoAUwBwAGwAaQB0AFAAZwBCAHIAZQBhAGsAQQBuAGQAUABhAHIAYQBN +AGEAcgBrAC8APgANAAoAIAAgADwALwB3ADoAQwBvAG0AcABhAHQAaQBiAGkAbABpAHQAeQA+AA0A +CgAgACAAPAB3ADoARABvAE4AbwB0AE8AcAB0AGkAbQBpAHoAZQBGAG8AcgBCAHIAbwB3AHMAZQBy +AC8APgANAAoAIAAgADwAbQA6AG0AYQB0AGgAUAByAD4ADQAKACAAIAAgADwAbQA6AG0AYQB0AGgA +RgBvAG4AdAAgAG0AOgB2AGEAbAA9ACIAQwBhAG0AYgByAGkAYQAgAE0AYQB0AGgAIgAvAD4ADQAK +ACAAIAAgADwAbQA6AGIAcgBrAEIAaQBuACAAbQA6AHYAYQBsAD0AIgBiAGUAZgBvAHIAZQAiAC8A +PgANAAoAIAAgACAAPABtADoAYgByAGsAQgBpAG4AUwB1AGIAIABtADoAdgBhAGwAPQAiACYAIwA0 +ADUAOwAtACIALwA+AA0ACgAgACAAIAA8AG0AOgBzAG0AYQBsAGwARgByAGEAYwAgAG0AOgB2AGEA +bAA9ACIAbwBmAGYAIgAvAD4ADQAKACAAIAAgADwAbQA6AGQAaQBzAHAARABlAGYALwA+AA0ACgAg +ACAAIAA8AG0AOgBsAE0AYQByAGcAaQBuACAAbQA6AHYAYQBsAD0AIgAwACIALwA+AA0ACgAgACAA +IAA8AG0AOgByAE0AYQByAGcAaQBuACAAbQA6AHYAYQBsAD0AIgAwACIALwA+AA0ACgAgACAAIAA8 +AG0AOgBkAGUAZgBKAGMAIABtADoAdgBhAGwAPQAiAGMAZQBuAHQAZQByAEcAcgBvAHUAcAAiAC8A +PgANAAoAIAAgACAAPABtADoAdwByAGEAcABJAG4AZABlAG4AdAAgAG0AOgB2AGEAbAA9ACIAMQA0 +ADQAMAAiAC8APgANAAoAIAAgACAAPABtADoAaQBuAHQATABpAG0AIABtADoAdgBhAGwAPQAiAHMA +dQBiAFMAdQBwACIALwA+AA0ACgAgACAAIAA8AG0AOgBuAGEAcgB5AEwAaQBtACAAbQA6AHYAYQBs +AD0AIgB1AG4AZABPAHYAcgAiAC8APgANAAoAIAAgADwALwBtADoAbQBhAHQAaABQAHIAPgA8AC8A +dwA6AFcAbwByAGQARABvAGMAdQBtAGUAbgB0AD4ADQAKADwALwB4AG0AbAA+ADwAIQBbAGUAbgBk +AGkAZgBdAC0ALQA+ADwAIQAtAC0AWwBpAGYAIABnAHQAZQAgAG0AcwBvACAAOQBdAD4APAB4AG0A +bAA+AA0ACgAgADwAdwA6AEwAYQB0AGUAbgB0AFMAdAB5AGwAZQBzACAARABlAGYATABvAGMAawBl +AGQAUwB0AGEAdABlAD0AIgBmAGEAbABzAGUAIgAgAEQAZQBmAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAZgBhAGwAcwBlACIADQAKACAAIABEAGUAZgBTAGUAbQBpAEgAaQBkAGQAZQBu +AD0AIgBmAGEAbABzAGUAIgAgAEQAZQBmAFEARgBvAHIAbQBhAHQAPQAiAGYAYQBsAHMAZQAiACAA +RABlAGYAUAByAGkAbwByAGkAdAB5AD0AIgA5ADkAIgANAAoAIAAgAEwAYQB0AGUAbgB0AFMAdAB5 +AGwAZQBDAG8AdQBuAHQAPQAiADMANwA2ACIAPgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5 +AD0AIgAwACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBOAG8A +cgBtAGEAbAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABRAEYA +bwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAAMQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABRAEYAbwByAG0AYQB0 +AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAAMgAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIA +dQBlACIAIABOAGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAAMwAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABO +AGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA5ACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIADQAKACAA +IAAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiACAAUQBGAG8AcgBt +AGEAdAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAaABlAGEAZABpAG4AZwAgADUAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAOQAiACAAUwBlAG0AaQBIAGkAZABkAGUA +bgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAi +AHQAcgB1AGUAIgAgAFEARgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAGgA +ZQBhAGQAaQBuAGcAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADkA +IgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgANAAoAIAAgACAAVQBuAGgAaQBk +AGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIA +dQBlACIAIABOAGEAbQBlAD0AIgBoAGUAYQBkAGkAbgBnACAANwAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgA5ACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBl +ACIADQAKACAAIAAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiACAA +UQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAaABlAGEAZABpAG4AZwAg +ADgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAOQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUA +cwBlAGQAPQAiAHQAcgB1AGUAIgAgAFEARgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4AYQBt +AGUAPQAiAGgAZQBhAGQAaQBuAGcAIAA5ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAaQBuAGQAZQB4ACAAMQAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGkAbgBkAGUA +eAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUA +IgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABO +AGEAbQBlAD0AIgBpAG4AZABlAHgAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAaQBuAGQAZQB4ACAANAAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGkAbgBkAGUA +eAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUA +IgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABO +AGEAbQBlAD0AIgBpAG4AZABlAHgAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAaQBuAGQAZQB4ACAANwAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGkAbgBkAGUA +eAAgADgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUA +IgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABO +AGEAbQBlAD0AIgBpAG4AZABlAHgAIAA5ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADMAOQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAi +AHQAbwBjACAAMQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgAzADkAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgANAAoAIAAgACAAVQBuAGgAaQBkAGUA +VwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgB0AG8AYwAgADIAIgAv +AD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0A +IgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMwA5ACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAdABvAGMAIAAzACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADMAOQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUA +IgAgAE4AYQBtAGUAPQAiAHQAbwBjACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkA +dAB5AD0AIgAzADkAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgANAAoAIAAg +ACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0A +IgB0AG8AYwAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMwA5ACIA +IABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAdABvAGMAIAA2ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMAOQAiACAAUwBlAG0AaQBIAGkA +ZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBl +AGQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAHQAbwBjACAANwAiAC8APgANAAoAIAAgADwA +dwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAi +ACAAUAByAGkAbwByAGkAdAB5AD0AIgAzADkAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQA +cgB1AGUAIgANAAoAIAAgACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBl +ACIAIABOAGEAbQBlAD0AIgB0AG8AYwAgADgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIAMwA5ACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIADQAKACAA +IAAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9 +ACIAdABvAGMAIAA5ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIATgBvAHIAbQBhAGwAIABJAG4AZABlAG4AdAAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGYAbwBvAHQA +bgBvAHQAZQAgAHQAZQB4AHQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBhAG4AbgBvAHQAYQB0AGkAbwBuACAAdABlAHgAdAAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AGgAZQBhAGQAZQByACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIAZgBvAG8AdABlAHIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkA +SABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9 +ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBpAG4AZABlAHgAIABoAGUAYQBkAGkA +bgBnACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMANQAiACAAUwBlAG0A +aQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBu +AFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAgAFEARgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4A +YQBtAGUAPQAiAGMAYQBwAHQAaQBvAG4AIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgB0AGEAYgBsAGUAIABvAGYAIABmAGkAZwB1AHIA +ZQBzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBh +AG0AZQA9ACIAZQBuAHYAZQBsAG8AcABlACAAYQBkAGQAcgBlAHMAcwAiAC8APgANAAoAIAAgADwA +dwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAi +ACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUA +bgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAGUAbgB2AGUAbABv +AHAAZQAgAHIAZQB0AHUAcgBuACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQA +aQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBu +AD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUA +IgANAAoAIAAgACAATgBhAG0AZQA9ACIAZgBvAG8AdABuAG8AdABlACAAcgBlAGYAZQByAGUAbgBj +AGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAg +AFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBhAG4AbgBvAHQAYQB0AGkAbwBuACAAcgBlAGYAZQByAGUAbgBjAGUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBsAGkA +bgBlACAAbgB1AG0AYgBlAHIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBwAGEAZwBlACAAbgB1AG0AYgBlAHIAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcA +aABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBlAG4AZABu +AG8AdABlACAAcgBlAGYAZQByAGUAbgBjAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABp +AGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIA +dAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBlAG4AZABuAG8AdABlACAAdABlAHgAdAAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBu +AGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAHQAYQBiAGwAZQAgAG8AZgAgAGEAdQB0AGgAbwByAGkAdABpAGUAcwAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBo +AGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAG0AYQBjAHIA +bwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAA +VQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBt +AGUAPQAiAHQAbwBhACAAaABlAGEAZABpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0A +IgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABl +AG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAA +QgB1AGwAbABlAHQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQA +cgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAg +ACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAATgB1AG0AYgBlAHIAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAMgAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBu +AGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAEwAaQBzAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0A +IgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAN +AAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBt +AGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAANQAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBk +AGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwA +aQBzAHQAIABCAHUAbABsAGUAdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAQgB1AGwAbABlAHQAIAAzACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4A +aABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9 +ACIATABpAHMAdAAgAEIAdQBsAGwAZQB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0A +IgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABCAHUAbABsAGUAdAAg +ADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAg +AFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBMAGkAcwB0ACAATgB1AG0AYgBlAHIAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUA +bQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBl +AGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAE4AdQBtAGIA +ZQByACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUA +ZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAg +AE4AYQBtAGUAPQAiAEwAaQBzAHQAIABOAHUAbQBiAGUAcgAgADQAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAATgB1 +AG0AYgBlAHIAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADEAMAAi +ACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIAVABpAHQAbABlACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4A +aABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9 +ACIAQwBsAG8AcwBpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9 +ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIA +DQAKACAAIAAgAE4AYQBtAGUAPQAiAFMAaQBnAG4AYQB0AHUAcgBlACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADEAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgANAAoAIAAgACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIA +IABOAGEAbQBlAD0AIgBEAGUAZgBhAHUAbAB0ACAAUABhAHIAYQBnAHIAYQBwAGgAIABGAG8AbgB0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABV +AG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0A +ZQA9ACIAQgBvAGQAeQAgAFQAZQB4AHQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBCAG8AZAB5ACAAVABlAHgAdAAgAEkAbgBkAGUA +bgB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBh +AG0AZQA9ACIATABpAHMAdAAgAEMAbwBuAHQAaQBuAHUAZQAiAC8APgANAAoAIAAgADwAdwA6AEwA +cwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBl +AG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMA +ZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABDAG8AbgB0 +AGkAbgB1AGUAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAEMAbwBuAHQAaQBuAHUAZQAgADMAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkA +cwB0ACAAQwBvAG4AdABpAG4AdQBlACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkA +ZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0 +AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABDAG8AbgB0AGkAbgB1AGUA +IAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBh +AG0AZQA9ACIATQBlAHMAcwBhAGcAZQAgAEgAZQBhAGQAZQByACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADEAMQAiACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiACAA +TgBhAG0AZQA9ACIAUwB1AGIAdABpAHQAbABlACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgA +aQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAi +AHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAUwBhAGwAdQB0AGEAdABpAG8AbgAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEQAYQB0AGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBCAG8AZAB5ACAAVABlAHgAdAAgAEYAaQByAHMAdAAgAEkAbgBkAGUAbgB0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABV +AG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0A +ZQA9ACIAQgBvAGQAeQAgAFQAZQB4AHQAIABGAGkAcgBzAHQAIABJAG4AZABlAG4AdAAgADIAIgAv +AD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0A +IgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBo +AGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0A +IgBOAG8AdABlACAASABlAGEAZABpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkA +ZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0 +AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEIAbwBkAHkAIABUAGUAeAB0ACAAMgAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEIAbwBkAHkAIABUAGUAeAB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABk +AGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIA +dQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEIAbwBkAHkAIABUAGUAeAB0ACAASQBuAGQAZQBu +AHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBl +ACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAA +TgBhAG0AZQA9ACIAQgBvAGQAeQAgAFQAZQB4AHQAIABJAG4AZABlAG4AdAAgADMAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBCAGwA +bwBjAGsAIABUAGUAeAB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0A +IgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAN +AAoAIAAgACAATgBhAG0AZQA9ACIASAB5AHAAZQByAGwAaQBuAGsAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBGAG8AbABsAG8AdwBl +AGQASAB5AHAAZQByAGwAaQBuAGsAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAA +dABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9 +ACIAMgAyACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBTAHQA +cgBvAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgAyADAAIgAgAFEA +RgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAEUAbQBwAGgAYQBzAGkAcwAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBu +AGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAEQAbwBjAHUAbQBlAG4AdAAgAE0AYQBwACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgA +aQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAi +AHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAUABsAGEAaQBuACAAVABlAHgAdAAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEUALQBtAGEAaQBsACAAUwBpAGcAbgBhAHQAdQByAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBt +AGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBIAFQATQBMACAAVABvAHAAIABv +AGYAIABGAG8AcgBtACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIASABUAE0ATAAgAEIAbwB0AHQAbwBtACAAbwBmACAARgBvAHIAbQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBu +AGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAE4AbwByAG0AYQBsACAAKABXAGUAYgApACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgA +aQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAi +AHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIASABUAE0ATAAgAEEAYwByAG8AbgB5AG0A +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUA +bgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBl +AD0AIgBIAFQATQBMACAAQQBkAGQAcgBlAHMAcwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0A +IgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEgAVABNAEwAIABDAGkAdABlACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABp +AGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIA +SABUAE0ATAAgAEMAbwBkAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBIAFQATQBMACAARABlAGYAaQBuAGkAdABpAG8AbgAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEgAVABNAEwAIABLAGUAeQBiAG8AYQByAGQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABp +AGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIA +dAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBIAFQATQBMACAAUAByAGUAZgBvAHIAbQBh +AHQAdABlAGQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBIAFQATQBMACAAUwBhAG0AcABsAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMA +ZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBz +AGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBIAFQATQBMACAAVAB5AHAA +ZQB3AHIAaQB0AGUAcgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIA +dAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAK +ACAAIAAgAE4AYQBtAGUAPQAiAEgAVABNAEwAIABWAGEAcgBpAGEAYgBsAGUAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcA +aABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBOAG8AcgBt +AGEAbAAgAFQAYQBiAGwAZQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9 +ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIA +DQAKACAAIAAgAE4AYQBtAGUAPQAiAGEAbgBuAG8AdABhAHQAaQBvAG4AIABzAHUAYgBqAGUAYwB0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABV +AG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0A +ZQA9ACIATgBvACAATABpAHMAdAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUA +bgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBl +ACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAE8AdQB0AGwAaQBuAGUAIABMAGkAcwB0ACAAMQAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AE8AdQB0AGwAaQBuAGUAIABMAGkAcwB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBI +AGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0A +IgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAE8AdQB0AGwAaQBuAGUAIABMAGkAcwB0 +ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAi +ACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4A +YQBtAGUAPQAiAFQAYQBiAGwAZQAgAFMAaQBtAHAAbABlACAAMQAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBV +AHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAFMA +aQBtAHAAbABlACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIA +dAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAK +ACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAFMAaQBtAHAAbABlACAAMwAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUA +VwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBi +AGwAZQAgAEMAbABhAHMAcwBpAGMAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAQwBsAGEAcwBzAGkAYwAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAg +AFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBUAGEAYgBsAGUAIABDAGwAYQBzAHMAaQBjACAAMwAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBV +AHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEMA +bABhAHMAcwBpAGMAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0A +IgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAN +AAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAQwBvAGwAbwByAGYAdQBsACAAMQAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AFQAYQBiAGwAZQAgAEMAbwBsAG8AcgBmAHUAbAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBt +AGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABDAG8AbABv +AHIAZgB1AGwAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0 +AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoA +IAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAQwBvAGwAdQBtAG4AcwAgADEAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEA +YgBsAGUAIABDAG8AbAB1AG0AbgBzACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkA +ZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0 +AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEMAbwBsAHUAbQBuAHMA +IAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIA +IABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBh +AG0AZQA9ACIAVABhAGIAbABlACAAQwBvAGwAdQBtAG4AcwAgADQAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABD +AG8AbAB1AG0AbgBzACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9 +ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIA +DQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEcAcgBpAGQAIAAxACIALwA+AA0ACgAg +ACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwA +cwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBX +AGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIA +bABlACAARwByAGkAZAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABHAHIAaQBkACAAMwAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUA +VwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBi +AGwAZQAgAEcAcgBpAGQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQA +aQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBu +AD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUA +IgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAARwByAGkAZAAgADUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABl +AFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEA +YgBsAGUAIABHAHIAaQBkACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUA +bgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBl +ACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEcAcgBpAGQAIAA3ACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQA +ZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABh +AGIAbABlACAARwByAGkAZAAgADgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAA +dABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABl +AG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUA +ZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABMAGkAcwB0ACAAMQAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBk +AGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQA +YQBiAGwAZQAgAEwAaQBzAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBw +AHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQA +ZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1 +AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAATABpAHMAdAAgADMAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkA +ZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBU +AGEAYgBsAGUAIABMAGkAcwB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABk +AGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIA +dQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEwAaQBzAHQAIAA1ACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABp +AGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIA +VABhAGIAbABlACAATABpAHMAdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABMAGkAcwB0ACAANwAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgA +aQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AFQAYQBiAGwAZQAgAEwAaQBzAHQAIAA4ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUAbQBpAEgAaQBk +AGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQA +cgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAMwBEACAAZQBmAGYAZQBj +AHQAcwAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIAAzAEQAIABlAGYAZgBlAGMAdABzACAAMgAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAAVQBuAGgAaQBk +AGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFQA +YQBiAGwAZQAgADMARAAgAGUAZgBmAGUAYwB0AHMAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABTAGUA +bQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQAZQBXAGgAZQBuAFUAcwBl +AGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABhAGIAbABlACAAQwBvAG4A +dABlAG0AcABvAHIAYQByAHkAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4A +PQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABFAGwAZQBnAGEAbgB0ACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABTAGUAbQBpAEgAaQBkAGQAZQBuAD0AIgB0AHIAdQBlACIAIABVAG4AaABpAGQA +ZQBXAGgAZQBuAFUAcwBlAGQAPQAiAHQAcgB1AGUAIgANAAoAIAAgACAATgBhAG0AZQA9ACIAVABh +AGIAbABlACAAUAByAG8AZgBlAHMAcwBpAG8AbgBhAGwAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBt +AGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUA +ZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABTAHUAYgB0 +AGwAZQAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABTAHUAYgB0AGwAZQAgADIAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABl +AG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUA +IABXAGUAYgAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQA +cgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAg +ACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABXAGUAYgAgADIAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4A +VQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBUAGEAYgBsAGUAIABX +AGUAYgAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1 +AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBCAGEAbABsAG8AbwBuACAAVABlAHgAdAAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgAzADkAIgAgAE4AYQBtAGUAPQAiAFQAYQBiAGwAZQAgAEcAcgBp +AGQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAg +AFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBUAGEAYgBsAGUAIABUAGgAZQBtAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkA +SABpAGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAFAAbABhAGMAZQBoAG8AbABk +AGUAcgAgAFQAZQB4AHQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8A +bgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMQAi +ACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIATgBvACAAUwBwAGEA +YwBpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADAAIgAgAE4A +YQBtAGUAPQAiAEwAaQBnAGgAdAAgAFMAaABhAGQAaQBuAGcAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAA +cgBpAG8AcgBpAHQAeQA9ACIANgAxACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQAIABMAGkAcwB0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYAMgAiACAATgBhAG0AZQA9 +ACIATABpAGcAaAB0ACAARwByAGkAZAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5 +AD0AIgA2ADMAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAAUwBoAGEAZABpAG4AZwAgADEA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA0ACIAIABOAGEAbQBlAD0A +IgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADYANQAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABMAGkAcwB0 +ACAAMQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADYAIgAgAE4AYQBt +AGUAPQAiAE0AZQBkAGkAdQBtACAATABpAHMAdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANgA3ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQA +IAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYAOAAiACAATgBhAG0A +ZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBk +AEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkA +bwByAGkAdAB5AD0AIgA2ADkAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAg +ADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAwACIAIABOAGEAbQBl +AD0AIgBEAGEAcgBrACAATABpAHMAdAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5 +AD0AIgA3ADEAIgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBmAHUAbAAgAFMAaABhAGQAaQBuAGcA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAyACIAIABOAGEAbQBlAD0A +IgBDAG8AbABvAHIAZgB1AGwAIABMAGkAcwB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADcAMwAiACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAARwByAGkAZAAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADAAIgAgAE4AYQBtAGUAPQAi +AEwAaQBnAGgAdAAgAFMAaABhAGQAaQBuAGcAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAxACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQA +IABMAGkAcwB0ACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADYAMgAiACAATgBhAG0AZQA9ACIATABpAGcAaAB0ACAARwByAGkAZAAgAEEAYwBj +AGUAbgB0ACAAMQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADMAIgAg +AE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAAUwBoAGEAZABpAG4AZwAgADEAIABBAGMAYwBlAG4A +dAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA0ACIAIABOAGEA +bQBlAD0AIgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAyACAAQQBjAGMAZQBuAHQAIAAx +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANQAiACAATgBhAG0AZQA9 +ACIATQBlAGQAaQB1AG0AIABMAGkAcwB0ACAAMQAgAEEAYwBjAGUAbgB0ACAAMQAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiACAATgBhAG0AZQA9ACIA +UgBlAHYAaQBzAGkAbwBuACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMA +NAAiACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBM +AGkAcwB0ACAAUABhAHIAYQBnAHIAYQBwAGgAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIAMgA5ACIAIABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0A +IgBRAHUAbwB0AGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMwAwACIA +IABRAEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEkAbgB0 +AGUAbgBzAGUAIABRAHUAbwB0AGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAA +dABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9 +ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwAaQBzAHQAIAAyACAAQQBjAGMA +ZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABM +AG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANwAiACAA +TgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAgAEEAYwBjAGUAbgB0ACAAMQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADgAIgAgAE4AYQBtAGUAPQAi +AE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUA +bQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBhAG0AZQA9ACIARABhAHIAawAgAEwAaQBzAHQAIABB +AGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8A +bgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAx +ACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMA +ZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABM +AG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMgAiACAA +TgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAAMQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3ADMAIgAgAE4AYQBtAGUAPQAi +AEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAwACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQA +IABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADYAMQAiACAATgBhAG0AZQA9ACIATABpAGcAaAB0ACAATABpAHMAdAAg +AEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2 +ADIAIgAgAE4AYQBtAGUAPQAiAEwAaQBnAGgAdAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADIA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAzACIAIABOAGEAbQBlAD0A +IgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAxACAAQQBjAGMAZQBuAHQAIAAyACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANAAiACAATgBhAG0AZQA9ACIATQBl +AGQAaQB1AG0AIABTAGgAYQBkAGkAbgBnACAAMgAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADUAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkA +dQBtACAATABpAHMAdAAgADEAIABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAA +cgBpAG8AcgBpAHQAeQA9ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwAaQBz +AHQAIAAyACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADYANwAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAgAEEA +YwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADgA +IgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4AdAAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEAbQBl +AD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBhAG0AZQA9ACIARABhAHIA +awAgAEwAaQBzAHQAIABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8A +cgBpAHQAeQA9ACIANwAxACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgAYQBk +AGkAbgBnACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADcAMgAiACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAgAEEA +YwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3ADMA +IgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAwACIAIABOAGEAbQBl +AD0AIgBMAGkAZwBoAHQAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYAMQAiACAATgBhAG0AZQA9ACIATABpAGcA +aAB0ACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBk +AEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkA +bwByAGkAdAB5AD0AIgA2ADIAIgAgAE4AYQBtAGUAPQAiAEwAaQBnAGgAdAAgAEcAcgBpAGQAIABB +AGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8A +bgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAz +ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAxACAAQQBjAGMA +ZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABM +AG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANAAiACAA +TgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABTAGgAYQBkAGkAbgBnACAAMgAgAEEAYwBjAGUAbgB0 +ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADUAIgAgAE4AYQBt +AGUAPQAiAE0AZQBkAGkAdQBtACAATABpAHMAdAAgADEAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUA +ZABpAHUAbQAgAEwAaQBzAHQAIAAyACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADYANwAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABH +AHIAaQBkACAAMQAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA2ADgAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIA +IABBAGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NgA5ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBu +AHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBh +AG0AZQA9ACIARABhAHIAawAgAEwAaQBzAHQAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAxACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIA +ZgB1AGwAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMgAiACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBs +ACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA3ADMAIgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQA +IABBAGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NgAwACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBu +AHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYAMQAiACAATgBh +AG0AZQA9ACIATABpAGcAaAB0ACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADIAIgAgAE4AYQBtAGUAPQAiAEwAaQBnAGgA +dAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8A +cgBpAHQAeQA9ACIANgAzACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBu +AGcAIAAxACAAQQBjAGMAZQBuAHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADYANAAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABTAGgAYQBkAGkAbgBnACAA +MgAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0A +IgA2ADUAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAATABpAHMAdAAgADEAIABBAGMAYwBl +AG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA2ACIAIABO +AGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwAaQBzAHQAIAAyACAAQQBjAGMAZQBuAHQAIAA0ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANwAiACAATgBhAG0AZQA9ACIA +TQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADgAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBt +ACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQA +IAAzACAAQQBjAGMAZQBuAHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBw +AHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkA +PQAiADcAMAAiACAATgBhAG0AZQA9ACIARABhAHIAawAgAEwAaQBzAHQAIABBAGMAYwBlAG4AdAAg +ADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANwAxACIAIABOAGEAbQBl +AD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAA0ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMgAiACAATgBhAG0AZQA9ACIA +QwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3ADMAIgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBm +AHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANgAwACIAIABOAGEAbQBlAD0AIgBMAGkAZwBoAHQAIABTAGgAYQBkAGkA +bgBnACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBw +AHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkA +PQAiADYAMQAiACAATgBhAG0AZQA9ACIATABpAGcAaAB0ACAATABpAHMAdAAgAEEAYwBjAGUAbgB0 +ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADIAIgAgAE4AYQBt +AGUAPQAiAEwAaQBnAGgAdAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAzACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUA +bQAgAFMAaABhAGQAaQBuAGcAIAAxACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADYANAAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABT +AGgAYQBkAGkAbgBnACAAMgAgAEEAYwBjAGUAbgB0ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwA +cwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUABy +AGkAbwByAGkAdAB5AD0AIgA2ADUAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAATABpAHMA +dAAgADEAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQA +eQA9ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwAaQBzAHQAIAAyACAAQQBj +AGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANwAi +ACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAgAEEAYwBjAGUAbgB0ACAA +NQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADgAIgAgAE4AYQBtAGUA +PQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEAbQBlAD0AIgBNAGUAZABp +AHUAbQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBhAG0AZQA9ACIARABhAHIAawAgAEwAaQBzAHQA +IABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NwAxACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgAYQBkAGkAbgBnACAAQQBj +AGMAZQBuAHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMgAi +ACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAgAEEAYwBjAGUAbgB0ACAA +NQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3ADMAIgAgAE4AYQBtAGUA +PQAiAEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAwACIAIABOAGEAbQBlAD0AIgBMAGkAZwBo +AHQAIABTAGgAYQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADYAMQAiACAATgBhAG0AZQA9ACIATABpAGcAaAB0ACAATABpAHMA +dAAgAEEAYwBjAGUAbgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0A +IgA2ADIAIgAgAE4AYQBtAGUAPQAiAEwAaQBnAGgAdAAgAEcAcgBpAGQAIABBAGMAYwBlAG4AdAAg +ADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgAzACIAIABOAGEAbQBl +AD0AIgBNAGUAZABpAHUAbQAgAFMAaABhAGQAaQBuAGcAIAAxACAAQQBjAGMAZQBuAHQAIAA2ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADYANAAiACAATgBhAG0AZQA9ACIA +TQBlAGQAaQB1AG0AIABTAGgAYQBkAGkAbgBnACAAMgAgAEEAYwBjAGUAbgB0ACAANgAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2ADUAIgAgAE4AYQBtAGUAPQAiAE0AZQBk +AGkAdQBtACAATABpAHMAdAAgADEAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFAAcgBpAG8AcgBpAHQAeQA9ACIANgA2ACIAIABOAGEAbQBlAD0AIgBNAGUAZABpAHUAbQAgAEwA +aQBzAHQAIAAyACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADYANwAiACAATgBhAG0AZQA9ACIATQBlAGQAaQB1AG0AIABHAHIAaQBkACAAMQAg +AEEAYwBjAGUAbgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA2 +ADgAIgAgAE4AYQBtAGUAPQAiAE0AZQBkAGkAdQBtACAARwByAGkAZAAgADIAIABBAGMAYwBlAG4A +dAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANgA5ACIAIABOAGEA +bQBlAD0AIgBNAGUAZABpAHUAbQAgAEcAcgBpAGQAIAAzACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADcAMAAiACAATgBhAG0AZQA9ACIARABh +AHIAawAgAEwAaQBzAHQAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANwAxACIAIABOAGEAbQBlAD0AIgBDAG8AbABvAHIAZgB1AGwAIABTAGgA +YQBkAGkAbgBnACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADcAMgAiACAATgBhAG0AZQA9ACIAQwBvAGwAbwByAGYAdQBsACAATABpAHMAdAAg +AEEAYwBjAGUAbgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA3 +ADMAIgAgAE4AYQBtAGUAPQAiAEMAbwBsAG8AcgBmAHUAbAAgAEcAcgBpAGQAIABBAGMAYwBlAG4A +dAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMQA5ACIAIABRAEYA +bwByAG0AYQB0AD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAFMAdQBiAHQAbABl +ACAARQBtAHAAaABhAHMAaQBzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQA +aQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAi +ADIAMQAiACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0A +IgBJAG4AdABlAG4AcwBlACAARQBtAHAAaABhAHMAaQBzACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADMAMQAiACAAUQBGAG8AcgBtAGEAdAA9ACIAdAByAHUAZQAiAA0ACgAg +ACAAIABOAGEAbQBlAD0AIgBTAHUAYgB0AGwAZQAgAFIAZQBmAGUAcgBlAG4AYwBlACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMAMgAiACAAUQBGAG8AcgBtAGEAdAA9ACIA +dAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBJAG4AdABlAG4AcwBlACAAUgBlAGYAZQBy +AGUAbgBjAGUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIAMwAzACIAIABR +AEYAbwByAG0AYQB0AD0AIgB0AHIAdQBlACIAIABOAGEAbQBlAD0AIgBCAG8AbwBrACAAVABpAHQA +bABlACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMANwAiACAAUwBlAG0A +aQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBu +AFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAgAE4AYQBtAGUAPQAiAEIAaQBiAGwAaQBvAGcAcgBhAHAA +aAB5ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADMAOQAiACAAUwBlAG0A +aQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAiAA0ACgAgACAAIABVAG4AaABpAGQAZQBXAGgAZQBu +AFUAcwBlAGQAPQAiAHQAcgB1AGUAIgAgAFEARgBvAHIAbQBhAHQAPQAiAHQAcgB1AGUAIgAgAE4A +YQBtAGUAPQAiAFQATwBDACAASABlAGEAZABpAG4AZwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBk +AEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkA +bwByAGkAdAB5AD0AIgA0ADEAIgAgAE4AYQBtAGUAPQAiAFAAbABhAGkAbgAgAFQAYQBiAGwAZQAg +ADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAAyACIAIABOAGEAbQBl +AD0AIgBQAGwAYQBpAG4AIABUAGEAYgBsAGUAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQA +RQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBv +AHIAaQB0AHkAPQAiADQAMwAiACAATgBhAG0AZQA9ACIAUABsAGEAaQBuACAAVABhAGIAbABlACAA +MwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADQAIgAgAE4AYQBtAGUA +PQAiAFAAbABhAGkAbgAgAFQAYQBiAGwAZQAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8A +cgBpAHQAeQA9ACIANAA1ACIAIABOAGEAbQBlAD0AIgBQAGwAYQBpAG4AIABUAGEAYgBsAGUAIAA1 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAMAAiACAATgBhAG0AZQA9 +ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgAEwAaQBnAGgAdAAiAC8APgANAAoAIAAgADwAdwA6AEwA +cwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUABy +AGkAbwByAGkAdAB5AD0AIgA0ADYAIgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUA +IAAxACAATABpAGcAaAB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQA +NwAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADIAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA4ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABh +AGIAbABlACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADkAIgAg +AE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADUAMAAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwA +ZQAgADUAIABEAGEAcgBrACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUA +MQAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADYAIABDAG8AbABvAHIAZgB1 +AGwAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAyACIAIABOAGEAbQBl +AD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIA +RwByAGkAZAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADEAIgAv +AD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0A +IgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBH +AHIAaQBkACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMQAiAC8APgANAAoAIAAgADwA +dwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAi +ACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEA +YgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4 +AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIA +aQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADQAIABB +AGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8A +bgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAw +ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMA +YwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIA +DQAKACAAIAAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwBy +AGYAdQBsACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAA +NwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMQAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIARwByAGkAZAAg +AFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAA +VABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBk +AEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkA +bwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAAz +ACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQA +aQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAi +ADQAOQAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4A +dAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBj +AGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEA +bQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAg +AE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAA +QQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUA +MgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBs +AG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwA +ZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAA +cgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABl +ACAAMgAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUA +cAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5 +AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMA +ZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABM +AG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAA +TgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADMAIgAv +AD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0A +IgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBH +AHIAaQBkACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUA +PQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBu +AHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAg +ACAAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUA +bAAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0A +IgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADEAIABM +AGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAAMgAgAEEA +YwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBu +ACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgA +IgAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAA0 +ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUA +ZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9 +ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAA +IAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABz +AGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAA +VABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEcAcgBp +AGQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAA0ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEA +bQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBj +AGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgAN +AAoAIAAgACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQA +IABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NAA3ACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0 +ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBt +AGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0A +CgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBh +AGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIARwByAGkA +ZAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBM +AHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAA +cgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABl +ACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEA +YgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAA1ACIALwA+AA0ACgAg +ACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwA +cwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBH +AHIAaQBkACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAA +NQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBl +AGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAA +TgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBl +AG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABO +AGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAANgAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEcA +cgBpAGQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIARwByAGkAZAAgAFQAYQBi +AGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAAVABhAGIAbABlACAANQAgAEQA +YQByAGsAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQA +eQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEcAcgBpAGQAIABUAGEAYgBsAGUAIAA2 +ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+AA0ACgAgACAAPAB3ADoA +TABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQ +AHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBHAHIAaQBkACAA +VABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAANgAiAC8APgAN +AAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYA +YQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgAgAE4AYQBtAGUAPQAiAEwAaQBz +AHQAIABUAGEAYgBsAGUAIAAxACAATABpAGcAaAB0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQA +RQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBv +AHIAaQB0AHkAPQAiADQANwAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADIA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA4ACIAIABOAGEAbQBlAD0A +IgBMAGkAcwB0ACAAVABhAGIAbABlACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkA +dAB5AD0AIgA0ADkAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAA0ACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMAAiACAATgBhAG0AZQA9ACIATABp +AHMAdAAgAFQAYQBiAGwAZQAgADUAIABEAGEAcgBrACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQA +RQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBv +AHIAaQB0AHkAPQAiADUAMQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADYA +IABDAG8AbABvAHIAZgB1AGwAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NQAyACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBm +AHUAbAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAg +ACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMA +YwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIA +IABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMQAi +AC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQA +PQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAi +AEwAaQBzAHQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAA +PAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBl +ACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQA +YQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABF +AHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8A +cgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANQAg +AEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADEAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBp +AHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUA +IAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAAxACIALwA+AA0ACgAgACAAPAB3 +ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIA +IABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0 +ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMQAiAC8A +PgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAi +AGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0A +ZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAg +ADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBl +AD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoA +IAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBs +AHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQA +IABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABz +AGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIA +aQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAg +ADQAIABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAA +dABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9 +ACIANQAwACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANQAgAEQAYQByAGsA +IABBAGMAYwBlAG4AdAAgADIAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABp +AG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIA +NQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAA2ACAAQwBv +AGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAAyACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQA +RQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBv +AHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIA +bABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMgAiAC8APgANAAoAIAAg +ADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMA +ZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABp +AHMAdAAgAFQAYQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADMAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBMAGkA +cwB0ACAAVABhAGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6 +AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAA +UAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBs +AGUAIAAzACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMA +ZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0 +AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADQAIABBAGMA +YwBlAG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAg +AEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIA +IABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBl +AG4AdAAgADMAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwA +bwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAK +ACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYA +dQBsACAAQQBjAGMAZQBuAHQAIAAzACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBw +AHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkA +PQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAg +AEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAAMwAiAC8APgANAAoAIAAgADwAdwA6AEwA +cwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUABy +AGkAbwByAGkAdAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQA +YQBiAGwAZQAgADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADQAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABh +AGIAbABlACAAMgAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUA +eABjAGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwBy +AGkAdAB5AD0AIgA0ADgAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAAzACAA +QQBjAGMAZQBuAHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBv +AG4AIABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQA +OQAiACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAg +ADQAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsA +ZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBl +AD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADQA +IgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBk +AD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4A +YQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBj +AGMAZQBuAHQAIAA0ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4A +IABMAG8AYwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAi +AA0ACgAgACAAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8A +cgBmAHUAbAAgAEEAYwBjAGUAbgB0ACAANAAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABj +AGUAcAB0AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkA +dAB5AD0AIgA0ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAg +ADEAIABMAGkAZwBoAHQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMA +ZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBp +AG8AcgBpAHQAeQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAA +MgAgAEEAYwBjAGUAbgB0ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0 +AGkAbwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0A +IgA0ADgAIgAgAE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBu +AHQAIAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8A +YwBrAGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBh +AG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4A +DQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBm +AGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBMAGkA +cwB0ACAAVABhAGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADUAIgAvAD4ADQAK +ACAAIAA8AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEA +bABzAGUAIgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAi +AEwAaQBzAHQAIABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQA +IAA1ACIALwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBr +AGUAZAA9ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAA +IABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAg +AEEAYwBjAGUAbgB0ACAANQAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkA +bwBuACAATABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0 +ADYAIgANAAoAIAAgACAATgBhAG0AZQA9ACIATABpAHMAdAAgAFQAYQBiAGwAZQAgADEAIABMAGkA +ZwBoAHQAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFAAcgBpAG8AcgBpAHQA +eQA9ACIANAA3ACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAAMgAgAEEAYwBj +AGUAbgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAA +TABvAGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUAByAGkAbwByAGkAdAB5AD0AIgA0ADgAIgAg +AE4AYQBtAGUAPQAiAEwAaQBzAHQAIABUAGEAYgBsAGUAIAAzACAAQQBjAGMAZQBuAHQAIAA2ACIA +LwA+AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9 +ACIAZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADQAOQAiACAATgBhAG0AZQA9ACIA +TABpAHMAdAAgAFQAYQBiAGwAZQAgADQAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8 +AHcAOgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUA +IgAgAFAAcgBpAG8AcgBpAHQAeQA9ACIANQAwACIAIABOAGEAbQBlAD0AIgBMAGkAcwB0ACAAVABh +AGIAbABlACAANQAgAEQAYQByAGsAIABBAGMAYwBlAG4AdAAgADYAIgAvAD4ADQAKACAAIAA8AHcA +OgBMAHMAZABFAHgAYwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAg +AFAAcgBpAG8AcgBpAHQAeQA9ACIANQAxACIADQAKACAAIAAgAE4AYQBtAGUAPQAiAEwAaQBzAHQA +IABUAGEAYgBsAGUAIAA2ACAAQwBvAGwAbwByAGYAdQBsACAAQQBjAGMAZQBuAHQAIAA2ACIALwA+ +AA0ACgAgACAAPAB3ADoATABzAGQARQB4AGMAZQBwAHQAaQBvAG4AIABMAG8AYwBrAGUAZAA9ACIA +ZgBhAGwAcwBlACIAIABQAHIAaQBvAHIAaQB0AHkAPQAiADUAMgAiAA0ACgAgACAAIABOAGEAbQBl +AD0AIgBMAGkAcwB0ACAAVABhAGIAbABlACAANwAgAEMAbwBsAG8AcgBmAHUAbAAgAEEAYwBjAGUA +bgB0ACAANgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUA +ZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAg +AE4AYQBtAGUAPQAiAE0AZQBuAHQAaQBvAG4AIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgA +YwBlAHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABp +AGQAZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIA +dAByAHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBTAG0AYQByAHQAIABIAHkAcABlAHIAbABp +AG4AawAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABvAGMA +awBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUAZQAi +ACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAgAE4A +YQBtAGUAPQAiAEgAYQBzAGgAdABhAGcAIgAvAD4ADQAKACAAIAA8AHcAOgBMAHMAZABFAHgAYwBl +AHAAdABpAG8AbgAgAEwAbwBjAGsAZQBkAD0AIgBmAGEAbABzAGUAIgAgAFMAZQBtAGkASABpAGQA +ZABlAG4APQAiAHQAcgB1AGUAIgAgAFUAbgBoAGkAZABlAFcAaABlAG4AVQBzAGUAZAA9ACIAdABy +AHUAZQAiAA0ACgAgACAAIABOAGEAbQBlAD0AIgBVAG4AcgBlAHMAbwBsAHYAZQBkACAATQBlAG4A +dABpAG8AbgAiAC8APgANAAoAIAAgADwAdwA6AEwAcwBkAEUAeABjAGUAcAB0AGkAbwBuACAATABv +AGMAawBlAGQAPQAiAGYAYQBsAHMAZQAiACAAUwBlAG0AaQBIAGkAZABkAGUAbgA9ACIAdAByAHUA +ZQAiACAAVQBuAGgAaQBkAGUAVwBoAGUAbgBVAHMAZQBkAD0AIgB0AHIAdQBlACIADQAKACAAIAAg +AE4AYQBtAGUAPQAiAFMAbQBhAHIAdAAgAEwAaQBuAGsAIgAvAD4ADQAKACAAPAAvAHcAOgBMAGEA +dABlAG4AdABTAHQAeQBsAGUAcwA+AA0ACgA8AC8AeABtAGwAPgA8ACEAWwBlAG4AZABpAGYAXQAt +AC0APgANAAoAPABzAHQAeQBsAGUAPgANAAoAPAAhAC0ALQANAAoAQABtAGUAZABpAGEAIABwAHIA +aQBuAHQAIAB7AA0ACgAgACAAIAAgACMAbQBhAGkAbgAgAHsADQAKACAAIAAgACAAIAAgACAAIABw +AGEAZABkAGkAbgBnAC0AYgBvAHQAdABvAG0AOgAgADEAZQBtACAAIQBpAG0AcABvAHIAdABhAG4A +dAA7ACAALwAqACAAVABoAGUAIABkAGUAZgBhAHUAbAB0ACAAcABhAGQAZABpAG4AZwAgAG8AZgAg +ADYAZQBtACAAaQBzACAAdABvAG8AIABtAHUAYwBoACAAZgBvAHIAIABwAHIAaQBuAHQAbwB1AHQA +cwAgACoALwANAAoAIAAgACAAIAB9AA0ACgANAAoAIAAgACAAIABiAG8AZAB5ACAAewANAAoAIAAg +ACAAIAAgACAAIAAgAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACAAQQByAGkAYQBsACwAIABIAGUA +bAB2AGUAdABpAGMAYQAsACAARgByAGUAZQBTAGEAbgBzACwAIABzAGEAbgBzAC0AcwBlAHIAaQBm +ADsADQAKACAAIAAgACAAIAAgACAAIABmAG8AbgB0AC0AcwBpAHoAZQA6ACAAMQAwAHAAdAA7AA0A +CgAgACAAIAAgACAAIAAgACAAbABpAG4AZQAtAGgAZQBpAGcAaAB0ADoAIAAxAC4AMgA7AA0ACgAg +ACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgAGIAbwBkAHkALAAgACMAZgB1AGwAbAAtAGgAZQBpAGcA +aAB0AC0AYwBvAG4AdABhAGkAbgBlAHIALAAgACMAbQBhAGkAbgAsACAAIwBwAGEAZwBlACwAIAAj +AGMAbwBuAHQAZQBuAHQALAAgAC4AaABhAHMALQBwAGUAcgBzAG8AbgBhAGwALQBzAGkAZABlAGIA +YQByACAAIwBjAG8AbgB0AGUAbgB0ACAAewANAAoAIAAgACAAIAAgACAAIAAgAGIAYQBjAGsAZwBy +AG8AdQBuAGQAOgAgACMAZgBmAGYAIAAhAGkAbQBwAG8AcgB0AGEAbgB0ADsADQAKACAAIAAgACAA +IAAgACAAIABjAG8AbABvAHIAOgAgACMAMAAwADAAIAAhAGkAbQBwAG8AcgB0AGEAbgB0ADsADQAK +ACAAIAAgACAAIAAgACAAIABiAG8AcgBkAGUAcgA6ACAAMAAgACEAaQBtAHAAbwByAHQAYQBuAHQA +OwANAAoAIAAgACAAIAAgACAAIAAgAHcAaQBkAHQAaAA6ACAAMQAwADAAJQAgACEAaQBtAHAAbwBy +AHQAYQBuAHQAOwANAAoAIAAgACAAIAAgACAAIAAgAGgAZQBpAGcAaAB0ADoAIABhAHUAdABvACAA +IQBpAG0AcABvAHIAdABhAG4AdAA7AA0ACgAgACAAIAAgACAAIAAgACAAbQBpAG4ALQBoAGUAaQBn +AGgAdAA6ACAAYQB1AHQAbwAgACEAaQBtAHAAbwByAHQAYQBuAHQAOwANAAoAIAAgACAAIAAgACAA +IAAgAG0AYQByAGcAaQBuADoAIAAwACAAIQBpAG0AcABvAHIAdABhAG4AdAA7AA0ACgAgACAAIAAg +ACAAIAAgACAAcABhAGQAZABpAG4AZwA6ACAAMAAgACEAaQBtAHAAbwByAHQAYQBuAHQAOwANAAoA +IAAgACAAIAAgACAAIAAgAGQAaQBzAHAAbABhAHkAOgAgAGIAbABvAGMAawAgACEAaQBtAHAAbwBy +AHQAYQBuAHQAOwANAAoAIAAgACAAIAB9AA0ACgANAAoAIAAgACAAIABhACwAIABhADoAbABpAG4A +awAsACAAYQA6AHYAaQBzAGkAdABlAGQALAAgAGEAOgBmAG8AYwB1AHMALAAgAGEAOgBoAG8AdgBl +AHIALAAgAGEAOgBhAGMAdABpAHYAZQAgAHsADQAKACAAIAAgACAAIAAgACAAIABjAG8AbABvAHIA +OgAgACMAMAAwADAAOwANAAoAIAAgACAAIAB9AA0ACgANAAoAIAAgACAAIAAjAGMAbwBuAHQAZQBu +AHQAIABoADEALAANAAoAIAAgACAAIAAjAGMAbwBuAHQAZQBuAHQAIABoADIALAANAAoAIAAgACAA +IAAjAGMAbwBuAHQAZQBuAHQAIABoADMALAANAAoAIAAgACAAIAAjAGMAbwBuAHQAZQBuAHQAIABo +ADQALAANAAoAIAAgACAAIAAjAGMAbwBuAHQAZQBuAHQAIABoADUALAANAAoAIAAgACAAIAAjAGMA +bwBuAHQAZQBuAHQAIABoADYAIAB7AA0ACgAgACAAIAAgACAAIAAgACAAZgBvAG4AdAAtAGYAYQBt +AGkAbAB5ADoAIABBAHIAaQBhAGwALAAgAEgAZQBsAHYAZQB0AGkAYwBhACwAIABGAHIAZQBlAFMA +YQBuAHMALAAgAHMAYQBuAHMALQBzAGUAcgBpAGYAOwANAAoAIAAgACAAIAAgACAAIAAgAHAAYQBn +AGUALQBiAHIAZQBhAGsALQBhAGYAdABlAHIAOgAgAGEAdgBvAGkAZAA7AA0ACgAgACAAIAAgAH0A +DQAKAA0ACgAgACAAIAAgAHAAcgBlACAAewANAAoAIAAgACAAIAAgACAAIAAgAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6ACAATQBvAG4AYQBjAG8ALAAgACIAQwBvAHUAcgBpAGUAcgAgAE4AZQB3ACIA +LAAgAG0AbwBuAG8AcwBwAGEAYwBlADsADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAAIwBo +AGUAYQBkAGUAcgAsAA0ACgAgACAAIAAgAC4AYQB1AGkALQBoAGUAYQBkAGUAcgAtAGkAbgBuAGUA +cgAsAA0ACgAgACAAIAAgACMAbgBhAHYAaQBnAGEAdABpAG8AbgAsAA0ACgAgACAAIAAgACMAcwBp +AGQAZQBiAGEAcgAsAA0ACgAgACAAIAAgAC4AcwBpAGQAZQBiAGEAcgAsAA0ACgAgACAAIAAgACMA +cABlAHIAcwBvAG4AYQBsAC0AaQBuAGYAbwAtAHMAaQBkAGUAYgBhAHIALAANAAoAIAAgACAAIAAu +AGkAYQAtAGYAaQB4AGUAZAAtAHMAaQBkAGUAYgBhAHIALAANAAoAIAAgACAAIAAuAHAAYQBnAGUA +LQBhAGMAdABpAG8AbgBzACwADQAKACAAIAAgACAALgBuAGEAdgBtAGUAbgB1ACwADQAKACAAIAAg +ACAALgBhAGoAcwAtAG0AZQBuAHUALQBiAGEAcgAsAA0ACgAgACAAIAAgAC4AbgBvAHAAcgBpAG4A +dAAsAA0ACgAgACAAIAAgAC4AaQBuAGwAaQBuAGUALQBjAG8AbgB0AHIAbwBsAC0AbABpAG4AawAs +AA0ACgAgACAAIAAgAC4AaQBuAGwAaQBuAGUALQBjAG8AbgB0AHIAbwBsAC0AbABpAG4AawAgAGEA +LAANAAoAIAAgACAAIABhAC4AcwBoAG8AdwAtAGwAYQBiAGUAbABzAC0AZQBkAGkAdABvAHIALAAN +AAoAIAAgACAAIAAuAGcAbABvAGIAYQBsAC0AYwBvAG0AbQBlAG4AdAAtAGEAYwB0AGkAbwBuAHMA +LAANAAoAIAAgACAAIAAuAGMAbwBtAG0AZQBuAHQALQBhAGMAdABpAG8AbgBzACwADQAKACAAIAAg +ACAALgBxAHUAaQBjAGsALQBjAG8AbQBtAGUAbgB0AC0AYwBvAG4AdABhAGkAbgBlAHIALAANAAoA +IAAgACAAIAAjAGEAZABkAGMAbwBtAG0AZQBuAHQAIAB7AA0ACgAgACAAIAAgACAAIAAgACAAZABp +AHMAcABsAGEAeQA6ACAAbgBvAG4AZQAgACEAaQBtAHAAbwByAHQAYQBuAHQAOwANAAoAIAAgACAA +IAB9AA0ACgANAAoAIAAgACAAIAAvACoAIABDAE8ATgBGAC0AMgA4ADUANAA0ACAAYwBhAG4AbgBv +AHQAIABwAHIAaQBuAHQAIABtAHUAbAB0AGkAcABsAGUAIABwAGEAZwBlAHMAIABpAG4AIABJAEUA +IAAqAC8ADQAKACAAIAAgACAAIwBzAHAAbABpAHQAdABlAHIALQBjAG8AbgB0AGUAbgB0ACAAewAN +AAoAIAAgACAAIAAgACAAIAAgAHAAbwBzAGkAdABpAG8AbgA6ACAAcgBlAGwAYQB0AGkAdgBlACAA +IQBpAG0AcABvAHIAdABhAG4AdAA7AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgAC4AYwBv +AG0AbQBlAG4AdAAgAC4AZABhAHQAZQA6ADoAYgBlAGYAbwByAGUAIAB7AA0ACgAgACAAIAAgACAA +IAAgACAAYwBvAG4AdABlAG4AdAA6ACAAbgBvAG4AZQAgACEAaQBtAHAAbwByAHQAYQBuAHQAOwAg +AC8AKgAgAHIAZQBtAG8AdgBlACAAbQBpAGQAZABvAHQAIABmAG8AcgAgAHAAcgBpAG4AdAAgAHYA +aQBlAHcAIAAqAC8ADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAAaAAxAC4AcABhAGcAZQB0 +AGkAdABsAGUAIABpAG0AZwAgAHsADQAKACAAIAAgACAAIAAgACAAIABoAGUAaQBnAGgAdAA6ACAA +YQB1AHQAbwA7AA0ACgAgACAAIAAgACAAIAAgACAAdwBpAGQAdABoADoAIABhAHUAdABvADsADQAK +ACAAIAAgACAAfQANAAoADQAKACAAIAAgACAALgBwAHIAaQBuAHQALQBvAG4AbAB5ACAAewANAAoA +IAAgACAAIAAgACAAIAAgAGQAaQBzAHAAbABhAHkAOgAgAGIAbABvAGMAawA7AA0ACgAgACAAIAAg +AH0ADQAKAA0ACgAgACAAIAAgACMAZgBvAG8AdABlAHIAIAB7AA0ACgAgACAAIAAgACAAIAAgACAA +cABvAHMAaQB0AGkAbwBuADoAIAByAGUAbABhAHQAaQB2AGUAIAAhAGkAbQBwAG8AcgB0AGEAbgB0 +ADsAIAAvACoAIABDAE8ATgBGAC0AMQA3ADUAMAA2ACAAUABsAGEAYwBlACAAdABoAGUAIABmAG8A +bwB0AGUAcgAgAGEAdAAgAGUAbgBkACAAbwBmACAAdABoAGUAIABjAG8AbgB0AGUAbgB0ACAAKgAv +AA0ACgAgACAAIAAgACAAIAAgACAAbQBhAHIAZwBpAG4AOgAgADAAOwANAAoAIAAgACAAIAAgACAA +IAAgAHAAYQBkAGQAaQBuAGcAOgAgADAAOwANAAoAIAAgACAAIAAgACAAIAAgAGIAYQBjAGsAZwBy +AG8AdQBuAGQAOgAgAG4AbwBuAGUAOwANAAoAIAAgACAAIAAgACAAIAAgAGMAbABlAGEAcgA6ACAA +YgBvAHQAaAA7AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgACMAcABvAHcAZQByAGUAZABi +AHkAIAB7AA0ACgAgACAAIAAgACAAIAAgACAAYgBvAHIAZABlAHIALQB0AG8AcAA6ACAAbgBvAG4A +ZQA7AA0ACgAgACAAIAAgACAAIAAgACAAYgBhAGMAawBnAHIAbwB1AG4AZAA6ACAAbgBvAG4AZQA7 +AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgACMAcABvAHcAZQByAGUAZABiAHkAIABsAGkA +LgBwAHIAaQBuAHQALQBvAG4AbAB5ACAAewANAAoAIAAgACAAIAAgACAAIAAgAGQAaQBzAHAAbABh +AHkAOgAgAGwAaQBzAHQALQBpAHQAZQBtADsADQAKACAAIAAgACAAIAAgACAAIABmAG8AbgB0AC0A +cwB0AHkAbABlADoAIABpAHQAYQBsAGkAYwA7AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAg +ACMAcABvAHcAZQByAGUAZABiAHkAIABsAGkALgBuAG8AcAByAGkAbgB0ACAAewANAAoAIAAgACAA +IAAgACAAIAAgAGQAaQBzAHAAbABhAHkAOgAgAG4AbwBuAGUAOwANAAoAIAAgACAAIAB9AA0ACgAN +AAoAIAAgACAAIAAvACoAIABuAG8AIAB3AGkAZAB0AGgAIABjAG8AbgB0AHIAbwBsAHMAIABpAG4A +IABwAHIAaQBuAHQAIAAqAC8ADQAKACAAIAAgACAALgB3AGkAawBpAC0AYwBvAG4AdABlAG4AdAAg +AC4AdABhAGIAbABlAC0AdwByAGEAcAAsAA0ACgAgACAAIAAgAC4AdwBpAGsAaQAtAGMAbwBuAHQA +ZQBuAHQAIABwACwADQAKACAAIAAgACAALgBwAGEAbgBlAGwAIAAuAGMAbwBkAGUAQwBvAG4AdABl +AG4AdAAsAA0ACgAgACAAIAAgAC4AcABhAG4AZQBsACAALgBjAG8AZABlAEMAbwBuAHQAZQBuAHQA +IABwAHIAZQAsAA0ACgAgACAAIAAgAC4AaQBtAGEAZwBlAC0AdwByAGEAcAAgAHsADQAKACAAIAAg +ACAAIAAgACAAIABvAHYAZQByAGYAbABvAHcAOgAgAHYAaQBzAGkAYgBsAGUAIAAhAGkAbQBwAG8A +cgB0AGEAbgB0ADsADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAALwAqACAAVABPAEQATwAg +AC0AIABzAGgAbwB1AGwAZAAgAHQAaABpAHMAIAB3AG8AcgBrAD8AIAAqAC8ADQAKACAAIAAgACAA +IwBjAGgAaQBsAGQAcgBlAG4ALQBzAGUAYwB0AGkAbwBuACwADQAKACAAIAAgACAAIwBjAG8AbQBt +AGUAbgB0AHMALQBzAGUAYwB0AGkAbwBuACAALgBjAG8AbQBtAGUAbgB0ACwADQAKACAAIAAgACAA +IwBjAG8AbQBtAGUAbgB0AHMALQBzAGUAYwB0AGkAbwBuACAALgBjAG8AbQBtAGUAbgB0ACAALgBj +AG8AbQBtAGUAbgB0AC0AYgBvAGQAeQAsAA0ACgAgACAAIAAgACMAYwBvAG0AbQBlAG4AdABzAC0A +cwBlAGMAdABpAG8AbgAgAC4AYwBvAG0AbQBlAG4AdAAgAC4AYwBvAG0AbQBlAG4AdAAtAGMAbwBu +AHQAZQBuAHQALAANAAoAIAAgACAAIAAjAGMAbwBtAG0AZQBuAHQAcwAtAHMAZQBjAHQAaQBvAG4A +IAAuAGMAbwBtAG0AZQBuAHQAIABwACAAewANAAoAIAAgACAAIAAgACAAIAAgAHAAYQBnAGUALQBi +AHIAZQBhAGsALQBpAG4AcwBpAGQAZQA6ACAAYQB2AG8AaQBkADsADQAKACAAIAAgACAAfQANAAoA +DQAKACAAIAAgACAAIwBwAGEAZwBlAC0AYwBoAGkAbABkAHIAZQBuACAAYQAgAHsADQAKACAAIAAg +ACAAIAAgACAAIAB0AGUAeAB0AC0AZABlAGMAbwByAGEAdABpAG8AbgA6ACAAbgBvAG4AZQA7AA0A +CgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgAC8AKgAqAA0ACgAgACAAIAAgACAAaABpAGQAZQAg +AHQAdwBpAHgAaQBlAHMADQAKAA0ACgAgACAAIAAgACAAdABoAGUAIABzAHAAZQBjAGkAZgBpAGMA +aQB0AHkAIABoAGUAcgBlACAAaQBzACAAYQAgAGgAYQBjAGsAIABiAGUAYwBhAHUAcwBlACAAcABy +AGkAbgB0ACAAcwB0AHkAbABlAHMADQAKACAAIAAgACAAIABhAHIAZQAgAGcAZQB0AHQAaQBuAGcA +IABsAG8AYQBkAGUAZAAgAGIAZQBmAG8AcgBlACAAdABoAGUAIABiAGEAcwBlACAAcwB0AHkAbABl +AHMALgAgACoALwANAAoAIAAgACAAIAAjAGMAbwBtAG0AZQBuAHQAcwAtAHMAZQBjAHQAaQBvAG4A +LgBwAGEAZwBlAFMAZQBjAHQAaQBvAG4AIAAuAHMAZQBjAHQAaQBvAG4ALQBoAGUAYQBkAGUAcgAs +AA0ACgAgACAAIAAgACMAYwBvAG0AbQBlAG4AdABzAC0AcwBlAGMAdABpAG8AbgAuAHAAYQBnAGUA +UwBlAGMAdABpAG8AbgAgAC4AcwBlAGMAdABpAG8AbgAtAHQAaQB0AGwAZQAsAA0ACgAgACAAIAAg +ACMAYwBoAGkAbABkAHIAZQBuAC0AcwBlAGMAdABpAG8AbgAuAHAAYQBnAGUAUwBlAGMAdABpAG8A +bgAgAC4AcwBlAGMAdABpAG8AbgAtAGgAZQBhAGQAZQByACwADQAKACAAIAAgACAAIwBjAGgAaQBs +AGQAcgBlAG4ALQBzAGUAYwB0AGkAbwBuAC4AcABhAGcAZQBTAGUAYwB0AGkAbwBuACAALgBzAGUA +YwB0AGkAbwBuAC0AdABpAHQAbABlACwADQAKACAAIAAgACAALgBjAGgAaQBsAGQAcgBlAG4ALQBz +AGgAbwB3AC0AaABpAGQAZQAgAHsADQAKACAAIAAgACAAIAAgACAAIABwAGEAZABkAGkAbgBnAC0A +bABlAGYAdAA6ACAAMAA7AA0ACgAgACAAIAAgACAAIAAgACAAbQBhAHIAZwBpAG4ALQBsAGUAZgB0 +ADoAIAAwADsADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAALgBjAGgAaQBsAGQAcgBlAG4A +LQBzAGgAbwB3AC0AaABpAGQAZQAuAGkAYwBvAG4AIAB7AA0ACgAgACAAIAAgACAAIAAgACAAZABp +AHMAcABsAGEAeQA6ACAAbgBvAG4AZQA7AA0ACgAgACAAIAAgAH0ADQAKAA0ACgAgACAAIAAgAC8A +KgAgAHAAZQByAHMAbwBuAGEAbAAgAHMAaQBkAGUAYgBhAHIAIAAqAC8ADQAKACAAIAAgACAALgBo +AGEAcwAtAHAAZQByAHMAbwBuAGEAbAAtAHMAaQBkAGUAYgBhAHIAIAAjAGMAbwBuAHQAZQBuAHQA +IAB7AA0ACgAgACAAIAAgACAAIAAgACAAbQBhAHIAZwBpAG4ALQByAGkAZwBoAHQAOgAgADAAcAB4 +ADsADQAKACAAIAAgACAAfQANAAoADQAKACAAIAAgACAALgBoAGEAcwAtAHAAZQByAHMAbwBuAGEA +bAAtAHMAaQBkAGUAYgBhAHIAIAAjAGMAbwBuAHQAZQBuAHQAIAAuAHAAYQBnAGUAUwBlAGMAdABp +AG8AbgAgAHsADQAKACAAIAAgACAAIAAgACAAIABtAGEAcgBnAGkAbgAtAHIAaQBnAGgAdAA6ACAA +MABwAHgAOwANAAoAIAAgACAAIAB9AA0ACgANAAoAIAAgACAAIAAuAG4AbwAtAHAAcgBpAG4AdAAs +ACAALgBuAG8ALQBwAHIAaQBuAHQAIAAqACAAewANAAoAIAAgACAAIAAgACAAIAAgAGQAaQBzAHAA +bABhAHkAOgAgAG4AbwBuAGUAIAAhAGkAbQBwAG8AcgB0AGEAbgB0ADsADQAKACAAIAAgACAAfQAN +AAoAfQANAAoADQAKACAALwAqACAARgBvAG4AdAAgAEQAZQBmAGkAbgBpAHQAaQBvAG4AcwAgACoA +LwANAAoAIABAAGYAbwBuAHQALQBmAGEAYwBlAA0ACgAJAHsAZgBvAG4AdAAtAGYAYQBtAGkAbAB5 +ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwANAAoACQBwAGEAbgBvAHMAZQAtADEAOgA1ACAAMAAgADAA +IAAwACAAMAAgADAAIAAwACAAMAAgADAAIAAwADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AYwBo +AGEAcgBzAGUAdAA6ADIAOwANAAoACQBtAHMAbwAtAGcAZQBuAGUAcgBpAGMALQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgBhAHUAdABvADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AcABpAHQAYwBo +ADoAdgBhAHIAaQBhAGIAbABlADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AcwBpAGcAbgBhAHQA +dQByAGUAOgAwACAAMgA2ADgANAAzADUANAA1ADYAIAAwACAAMAAgAC0AMgAxADQANwA0ADgAMwA2 +ADQAOAAgADAAOwB9AA0ACgBAAGYAbwBuAHQALQBmAGEAYwBlAA0ACgAJAHsAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAIgBDAGEAbQBiAHIAaQBhACAATQBhAHQAaAAiADsADQAKAAkAcABhAG4AbwBz +AGUALQAxADoAMgAgADQAIAA1ACAAMwAgADUAIAA0ACAANgAgADMAIAAyACAANAA7AA0ACgAJAG0A +cwBvAC0AZgBvAG4AdAAtAGMAaABhAHIAcwBlAHQAOgAwADsADQAKAAkAbQBzAG8ALQBnAGUAbgBl +AHIAaQBjAC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAcgBvAG0AYQBuADsADQAKAAkAbQBzAG8A +LQBmAG8AbgB0AC0AcABpAHQAYwBoADoAdgBhAHIAaQBhAGIAbABlADsADQAKAAkAbQBzAG8ALQBm +AG8AbgB0AC0AcwBpAGcAbgBhAHQAdQByAGUAOgAzACAAMAAgADAAIAAwACAAMQAgADAAOwB9AA0A +CgBAAGYAbwBuAHQALQBmAGEAYwBlAA0ACgAJAHsAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBT +AGUAZwBvAGUAIABVAEkAIgA7AA0ACgAJAHAAYQBuAG8AcwBlAC0AMQA6ADIAIAAxADEAIAA1ACAA +MgAgADQAIAAyACAANAAgADIAIAAyACAAMwA7AA0ACgAJAG0AcwBvAC0AZgBvAG4AdAAtAGMAaABh +AHIAcwBlAHQAOgAwADsADQAKAAkAbQBzAG8ALQBnAGUAbgBlAHIAaQBjAC0AZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAcwB3AGkAcwBzADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AcABpAHQAYwBo +ADoAdgBhAHIAaQBhAGIAbABlADsADQAKAAkAbQBzAG8ALQBmAG8AbgB0AC0AcwBpAGcAbgBhAHQA +dQByAGUAOgAtADQANgA5ADcANQAwADAAMQA3ACAALQAxADAANwAzADYAOAAzADMAMgA5ACAAOQAg +ADAAIAA1ADEAMQAgADAAOwB9AA0ACgAgAC8AKgAgAFMAdAB5AGwAZQAgAEQAZQBmAGkAbgBpAHQA +aQBvAG4AcwAgACoALwANAAoAIABwAC4ATQBzAG8ATgBvAHIAbQBhAGwALAAgAGwAaQAuAE0AcwBv +AE4AbwByAG0AYQBsACwAIABkAGkAdgAuAE0AcwBvAE4AbwByAG0AYQBsAA0ACgAJAHsAbQBzAG8A +LQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABl +AC0AcQBmAG8AcgBtAGEAdAA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBwAGEA +cgBlAG4AdAA6ACIAIgA7AA0ACgAJAG0AYQByAGcAaQBuADoAMABjAG0AOwANAAoACQBtAGEAcgBn +AGkAbgAtAGIAbwB0AHQAbwBtADoALgAwADAAMAAxAHAAdAA7AA0ACgAJAG0AcwBvAC0AcABhAGcA +aQBuAGEAdABpAG8AbgA6AHcAaQBkAG8AdwAtAG8AcgBwAGgAYQBuADsADQAKAAkAZgBvAG4AdAAt +AHMAaQB6AGUAOgAxADIALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIA +VABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAsAHMAZQByAGkAZgA7AA0ACgAJAG0AcwBv +AC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAA +TgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAHQAaABl +AG0AZQAtAGYAbwBuAHQAOgBtAGkAbgBvAHIALQBmAGEAcgBlAGEAcwB0ADsAfQANAAoAaAAxAA0A +CgAJAHsAbQBzAG8ALQBzAHQAeQBsAGUALQBwAHIAaQBvAHIAaQB0AHkAOgA5ADsADQAKAAkAbQBz +AG8ALQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkA +bABlAC0AcQBmAG8AcgBtAGEAdAA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBs +AGkAbgBrADoAIgBIAGUAYQBkAGkAbgBnACAAMQAgAEMAaABhAHIAIgA7AA0ACgAJAG0AcwBvAC0A +bQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAJAG0AYQByAGcAaQBu +AC0AcgBpAGcAaAB0ADoAMABjAG0AOwANAAoACQBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQA +dABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoACQBtAGEAcgBnAGkAbgAtAGwAZQBmAHQAOgAw +AGMAbQA7AA0ACgAJAG0AcwBvAC0AcABhAGcAaQBuAGEAdABpAG8AbgA6AHcAaQBkAG8AdwAtAG8A +cgBwAGgAYQBuADsADQAKAAkAbQBzAG8ALQBvAHUAdABsAGkAbgBlAC0AbABlAHYAZQBsADoAMQA7 +AA0ACgAJAGYAbwBuAHQALQBzAGkAegBlADoAMgA0AC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIALABzAGUAcgBp +AGYAOwANAAoACQBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkA +OgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAOwANAAoACQBtAHMAbwAtAGYAYQBy +AGUAYQBzAHQALQB0AGgAZQBtAGUALQBmAG8AbgB0ADoAbQBpAG4AbwByAC0AZgBhAHIAZQBhAHMA +dAA7AA0ACgAJAGYAbwBuAHQALQB3AGUAaQBnAGgAdAA6AGIAbwBsAGQAOwB9AA0ACgBoADIADQAK +AAkAewBtAHMAbwAtAHMAdAB5AGwAZQAtAHAAcgBpAG8AcgBpAHQAeQA6ADkAOwANAAoACQBtAHMA +bwAtAHMAdAB5AGwAZQAtAHUAbgBoAGkAZABlADoAbgBvADsADQAKAAkAbQBzAG8ALQBzAHQAeQBs +AGUALQBxAGYAbwByAG0AYQB0ADoAeQBlAHMAOwANAAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAGwA +aQBuAGsAOgAiAEgAZQBhAGQAaQBuAGcAIAAyACAAQwBoAGEAcgAiADsADQAKAAkAbQBzAG8ALQBt +AGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsADQAKAAkAbQBhAHIAZwBpAG4A +LQByAGkAZwBoAHQAOgAwAGMAbQA7AA0ACgAJAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0 +AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAJAG0AYQByAGcAaQBuAC0AbABlAGYAdAA6ADAA +YwBtADsADQAKAAkAbQBzAG8ALQBwAGEAZwBpAG4AYQB0AGkAbwBuADoAdwBpAGQAbwB3AC0AbwBy +AHAAaABhAG4AOwANAAoACQBtAHMAbwAtAG8AdQB0AGwAaQBuAGUALQBsAGUAdgBlAGwAOgAyADsA +DQAKAAkAZgBvAG4AdAAtAHMAaQB6AGUAOgAxADgALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAsAHMAZQByAGkA +ZgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIA +ZQBhAHMAdAAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGkAbgBvAHIALQBmAGEAcgBlAGEAcwB0 +ADsADQAKAAkAZgBvAG4AdAAtAHcAZQBpAGcAaAB0ADoAYgBvAGwAZAA7AH0ADQAKAGgAMwANAAoA +CQB7AG0AcwBvAC0AcwB0AHkAbABlAC0AcAByAGkAbwByAGkAdAB5ADoAOQA7AA0ACgAJAG0AcwBv +AC0AcwB0AHkAbABlAC0AdQBuAGgAaQBkAGUAOgBuAG8AOwANAAoACQBtAHMAbwAtAHMAdAB5AGwA +ZQAtAHEAZgBvAHIAbQBhAHQAOgB5AGUAcwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbABp +AG4AawA6ACIASABlAGEAZABpAG4AZwAgADMAIABDAGgAYQByACIAOwANAAoACQBtAHMAbwAtAG0A +YQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoACQBtAGEAcgBnAGkAbgAt +AHIAaQBnAGgAdAA6ADAAYwBtADsADQAKAAkAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQA +bwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKAAkAbQBhAHIAZwBpAG4ALQBsAGUAZgB0ADoAMABj +AG0AOwANAAoACQBtAHMAbwAtAHAAYQBnAGkAbgBhAHQAaQBvAG4AOgB3AGkAZABvAHcALQBvAHIA +cABoAGEAbgA7AA0ACgAJAG0AcwBvAC0AbwB1AHQAbABpAG4AZQAtAGwAZQB2AGUAbAA6ADMAOwAN +AAoACQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMwAuADUAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACwAcwBlAHIAaQBm +ADsADQAKAAkAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +IgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiADsADQAKAAkAbQBzAG8ALQBmAGEAcgBl +AGEAcwB0AC0AdABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AaQBuAG8AcgAtAGYAYQByAGUAYQBzAHQA +OwANAAoACQBmAG8AbgB0AC0AdwBlAGkAZwBoAHQAOgBiAG8AbABkADsAfQANAAoAaAA0AA0ACgAJ +AHsAbQBzAG8ALQBzAHQAeQBsAGUALQBwAHIAaQBvAHIAaQB0AHkAOgA5ADsADQAKAAkAbQBzAG8A +LQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABl +AC0AcQBmAG8AcgBtAGEAdAA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBsAGkA +bgBrADoAIgBIAGUAYQBkAGkAbgBnACAANAAgAEMAaABhAHIAIgA7AA0ACgAJAG0AcwBvAC0AbQBh +AHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAJAG0AYQByAGcAaQBuAC0A +cgBpAGcAaAB0ADoAMABjAG0AOwANAAoACQBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABv +AG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoACQBtAGEAcgBnAGkAbgAtAGwAZQBmAHQAOgAwAGMA +bQA7AA0ACgAJAG0AcwBvAC0AcABhAGcAaQBuAGEAdABpAG8AbgA6AHcAaQBkAG8AdwAtAG8AcgBw +AGgAYQBuADsADQAKAAkAbQBzAG8ALQBvAHUAdABsAGkAbgBlAC0AbABlAHYAZQBsADoANAA7AA0A +CgAJAGYAbwBuAHQALQBzAGkAegBlADoAMQAyAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBh +AG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIALABzAGUAcgBpAGYA +OwANAAoACQBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAi +AFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAOwANAAoACQBtAHMAbwAtAGYAYQByAGUA +YQBzAHQALQB0AGgAZQBtAGUALQBmAG8AbgB0ADoAbQBpAG4AbwByAC0AZgBhAHIAZQBhAHMAdAA7 +AA0ACgAJAGYAbwBuAHQALQB3AGUAaQBnAGgAdAA6AGIAbwBsAGQAOwB9AA0ACgBhADoAbABpAG4A +awAsACAAcwBwAGEAbgAuAE0AcwBvAEgAeQBwAGUAcgBsAGkAbgBrAA0ACgAJAHsAbQBzAG8ALQBz +AHQAeQBsAGUALQBuAG8AcwBoAG8AdwA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUA +LQBwAHIAaQBvAHIAaQB0AHkAOgA5ADkAOwANAAoACQBjAG8AbABvAHIAOgBiAGwAdQBlADsADQAK +AAkAdABlAHgAdAAtAGQAZQBjAG8AcgBhAHQAaQBvAG4AOgB1AG4AZABlAHIAbABpAG4AZQA7AA0A +CgAJAHQAZQB4AHQALQB1AG4AZABlAHIAbABpAG4AZQA6AHMAaQBuAGcAbABlADsAfQANAAoAYQA6 +AHYAaQBzAGkAdABlAGQALAAgAHMAcABhAG4ALgBNAHMAbwBIAHkAcABlAHIAbABpAG4AawBGAG8A +bABsAG8AdwBlAGQADQAKAAkAewBtAHMAbwAtAHMAdAB5AGwAZQAtAG4AbwBzAGgAbwB3ADoAeQBl +AHMAOwANAAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAHAAcgBpAG8AcgBpAHQAeQA6ADkAOQA7AA0A +CgAJAGMAbwBsAG8AcgA6AHAAdQByAHAAbABlADsADQAKAAkAdABlAHgAdAAtAGQAZQBjAG8AcgBh +AHQAaQBvAG4AOgB1AG4AZABlAHIAbABpAG4AZQA7AA0ACgAJAHQAZQB4AHQALQB1AG4AZABlAHIA +bABpAG4AZQA6AHMAaQBuAGcAbABlADsAfQANAAoAcAANAAoACQB7AG0AcwBvAC0AcwB0AHkAbABl +AC0AbgBvAHMAaABvAHcAOgB5AGUAcwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AcAByAGkA +bwByAGkAdAB5ADoAOQA5ADsADQAKAAkAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBs +AHQAOgBhAHUAdABvADsADQAKAAkAbQBhAHIAZwBpAG4ALQByAGkAZwBoAHQAOgAwAGMAbQA7AA0A +CgAJAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7 +AA0ACgAJAG0AYQByAGcAaQBuAC0AbABlAGYAdAA6ADAAYwBtADsADQAKAAkAbQBzAG8ALQBwAGEA +ZwBpAG4AYQB0AGkAbwBuADoAdwBpAGQAbwB3AC0AbwByAHAAaABhAG4AOwANAAoACQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMgAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +IgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACwAcwBlAHIAaQBmADsADQAKAAkAbQBz +AG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMA +IABOAGUAdwAgAFIAbwBtAGEAbgAiADsADQAKAAkAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AdABo +AGUAbQBlAC0AZgBvAG4AdAA6AG0AaQBuAG8AcgAtAGYAYQByAGUAYQBzAHQAOwB9AA0ACgBzAHAA +YQBuAC4ASABlAGEAZABpAG4AZwAxAEMAaABhAHIADQAKAAkAewBtAHMAbwAtAHMAdAB5AGwAZQAt +AG4AYQBtAGUAOgAiAEgAZQBhAGQAaQBuAGcAIAAxACAAQwBoAGEAcgAiADsADQAKAAkAbQBzAG8A +LQBzAHQAeQBsAGUALQBwAHIAaQBvAHIAaQB0AHkAOgA5ADsADQAKAAkAbQBzAG8ALQBzAHQAeQBs +AGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbABvAGMA +awBlAGQAOgB5AGUAcwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbABpAG4AawA6ACIASABl +AGEAZABpAG4AZwAgADEAIgA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkA +egBlADoAMQA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGIAaQBkAGkALQBmAG8AbgB0AC0AcwBp +AHoAZQA6ADEANgAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAGEA +bABpAGIAcgBpACAATABpAGcAaAB0ACIALABzAGEAbgBzAC0AcwBlAHIAaQBmADsADQAKAAkAbQBz +AG8ALQBhAHMAYwBpAGkALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAEMAYQBsAGkAYgByAGkA +IABMAGkAZwBoAHQAIgA7AA0ACgAJAG0AcwBvAC0AYQBzAGMAaQBpAC0AdABoAGUAbQBlAC0AZgBv +AG4AdAA6AG0AYQBqAG8AcgAtAGwAYQB0AGkAbgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMA +dAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBh +AG4AIgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAHQAaABlAG0AZQAtAGYAbwBuAHQA +OgBtAGEAagBvAHIALQBmAGEAcgBlAGEAcwB0ADsADQAKAAkAbQBzAG8ALQBoAGEAbgBzAGkALQBm +AG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAEMAYQBsAGkAYgByAGkAIABMAGkAZwBoAHQAIgA7AA0A +CgAJAG0AcwBvAC0AaABhAG4AcwBpAC0AdABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAt +AGwAYQB0AGkAbgA7AA0ACgAJAG0AcwBvAC0AYgBpAGQAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwA +eQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AYgBp +AGQAaQAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGEAagBvAHIALQBiAGkAZABpADsADQAKAAkA +YwBvAGwAbwByADoAIwAyAEYANQA0ADkANgA7AA0ACgAJAG0AcwBvAC0AdABoAGUAbQBlAGMAbwBs +AG8AcgA6AGEAYwBjAGUAbgB0ADEAOwANAAoACQBtAHMAbwAtAHQAaABlAG0AZQBzAGgAYQBkAGUA +OgAxADkAMQA7AH0ADQAKAHMAcABhAG4ALgBIAGUAYQBkAGkAbgBnADIAQwBoAGEAcgANAAoACQB7 +AG0AcwBvAC0AcwB0AHkAbABlAC0AbgBhAG0AZQA6ACIASABlAGEAZABpAG4AZwAgADIAIABDAGgA +YQByACIAOwANAAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAG4AbwBzAGgAbwB3ADoAeQBlAHMAOwAN +AAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAHAAcgBpAG8AcgBpAHQAeQA6ADkAOwANAAoACQBtAHMA +bwAtAHMAdAB5AGwAZQAtAHUAbgBoAGkAZABlADoAbgBvADsADQAKAAkAbQBzAG8ALQBzAHQAeQBs +AGUALQBsAG8AYwBrAGUAZAA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBsAGkA +bgBrADoAIgBIAGUAYQBkAGkAbgBnACAAMgAiADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBv +AG4AdAAtAHMAaQB6AGUAOgAxADMALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYgBpAGQAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAzAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgAiAEMAYQBsAGkAYgByAGkAIABMAGkAZwBoAHQAIgAsAHMAYQBuAHMALQBzAGUAcgBpAGYA +OwANAAoACQBtAHMAbwAtAGEAcwBjAGkAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAQwBh +AGwAaQBiAHIAaQAgAEwAaQBnAGgAdAAiADsADQAKAAkAbQBzAG8ALQBhAHMAYwBpAGkALQB0AGgA +ZQBtAGUALQBmAG8AbgB0ADoAbQBhAGoAbwByAC0AbABhAHQAaQBuADsADQAKAAkAbQBzAG8ALQBm +AGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUA +dwAgAFIAbwBtAGEAbgAiADsADQAKAAkAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AdABoAGUAbQBl +AC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAtAGYAYQByAGUAYQBzAHQAOwANAAoACQBtAHMAbwAtAGgA +YQBuAHMAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAQwBhAGwAaQBiAHIAaQAgAEwAaQBn +AGgAdAAiADsADQAKAAkAbQBzAG8ALQBoAGEAbgBzAGkALQB0AGgAZQBtAGUALQBmAG8AbgB0ADoA +bQBhAGoAbwByAC0AbABhAHQAaQBuADsADQAKAAkAbQBzAG8ALQBiAGkAZABpAC0AZgBvAG4AdAAt +AGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiADsADQAKAAkA +bQBzAG8ALQBiAGkAZABpAC0AdABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAtAGIAaQBk +AGkAOwANAAoACQBjAG8AbABvAHIAOgAjADIARgA1ADQAOQA2ADsADQAKAAkAbQBzAG8ALQB0AGgA +ZQBtAGUAYwBvAGwAbwByADoAYQBjAGMAZQBuAHQAMQA7AA0ACgAJAG0AcwBvAC0AdABoAGUAbQBl +AHMAaABhAGQAZQA6ADEAOQAxADsAfQANAAoAcwBwAGEAbgAuAEgAZQBhAGQAaQBuAGcAMwBDAGgA +YQByAA0ACgAJAHsAbQBzAG8ALQBzAHQAeQBsAGUALQBuAGEAbQBlADoAIgBIAGUAYQBkAGkAbgBn +ACAAMwAgAEMAaABhAHIAIgA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbgBvAHMAaABvAHcA +OgB5AGUAcwA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AcAByAGkAbwByAGkAdAB5ADoAOQA7 +AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AdQBuAGgAaQBkAGUAOgBuAG8AOwANAAoACQBtAHMA +bwAtAHMAdAB5AGwAZQAtAGwAbwBjAGsAZQBkADoAeQBlAHMAOwANAAoACQBtAHMAbwAtAHMAdAB5 +AGwAZQAtAGwAaQBuAGsAOgAiAEgAZQBhAGQAaQBuAGcAIAAzACIAOwANAAoACQBtAHMAbwAtAGEA +bgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBi +AGkAZABpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADIALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQA +LQBmAGEAbQBpAGwAeQA6ACIAQwBhAGwAaQBiAHIAaQAgAEwAaQBnAGgAdAAiACwAcwBhAG4AcwAt +AHMAZQByAGkAZgA7AA0ACgAJAG0AcwBvAC0AYQBzAGMAaQBpAC0AZgBvAG4AdAAtAGYAYQBtAGkA +bAB5ADoAIgBDAGEAbABpAGIAcgBpACAATABpAGcAaAB0ACIAOwANAAoACQBtAHMAbwAtAGEAcwBj +AGkAaQAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGEAagBvAHIALQBsAGEAdABpAG4AOwANAAoA +CQBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBt +AGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAOwANAAoACQBtAHMAbwAtAGYAYQByAGUAYQBzAHQA +LQB0AGgAZQBtAGUALQBmAG8AbgB0ADoAbQBhAGoAbwByAC0AZgBhAHIAZQBhAHMAdAA7AA0ACgAJ +AG0AcwBvAC0AaABhAG4AcwBpAC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAGEAbABpAGIA +cgBpACAATABpAGcAaAB0ACIAOwANAAoACQBtAHMAbwAtAGgAYQBuAHMAaQAtAHQAaABlAG0AZQAt +AGYAbwBuAHQAOgBtAGEAagBvAHIALQBsAGEAdABpAG4AOwANAAoACQBtAHMAbwAtAGIAaQBkAGkA +LQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBu +ACIAOwANAAoACQBtAHMAbwAtAGIAaQBkAGkALQB0AGgAZQBtAGUALQBmAG8AbgB0ADoAbQBhAGoA +bwByAC0AYgBpAGQAaQA7AA0ACgAJAGMAbwBsAG8AcgA6ACMAMQBGADMANwA2ADMAOwANAAoACQBt +AHMAbwAtAHQAaABlAG0AZQBjAG8AbABvAHIAOgBhAGMAYwBlAG4AdAAxADsADQAKAAkAbQBzAG8A +LQB0AGgAZQBtAGUAcwBoAGEAZABlADoAMQAyADcAOwB9AA0ACgBzAHAAYQBuAC4ASABlAGEAZABp +AG4AZwA0AEMAaABhAHIADQAKAAkAewBtAHMAbwAtAHMAdAB5AGwAZQAtAG4AYQBtAGUAOgAiAEgA +ZQBhAGQAaQBuAGcAIAA0ACAAQwBoAGEAcgAiADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBu +AG8AcwBoAG8AdwA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBwAHIAaQBvAHIA +aQB0AHkAOgA5ADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7 +AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbABvAGMAawBlAGQAOgB5AGUAcwA7AA0ACgAJAG0A +cwBvAC0AcwB0AHkAbABlAC0AbABpAG4AawA6ACIASABlAGEAZABpAG4AZwAgADQAIgA7AA0ACgAJ +AG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAyAC4AMABwAHQAOwANAAoA +CQBtAHMAbwAtAGIAaQBkAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMgAuADAAcAB0ADsADQAK +AAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAGEAbABpAGIAcgBpACAATABpAGcAaAB0ACIA +LABzAGEAbgBzAC0AcwBlAHIAaQBmADsADQAKAAkAbQBzAG8ALQBhAHMAYwBpAGkALQBmAG8AbgB0 +AC0AZgBhAG0AaQBsAHkAOgAiAEMAYQBsAGkAYgByAGkAIABMAGkAZwBoAHQAIgA7AA0ACgAJAG0A +cwBvAC0AYQBzAGMAaQBpAC0AdABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAtAGwAYQB0 +AGkAbgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwA +eQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AZgBh +AHIAZQBhAHMAdAAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGEAagBvAHIALQBmAGEAcgBlAGEA +cwB0ADsADQAKAAkAbQBzAG8ALQBoAGEAbgBzAGkALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAi +AEMAYQBsAGkAYgByAGkAIABMAGkAZwBoAHQAIgA7AA0ACgAJAG0AcwBvAC0AaABhAG4AcwBpAC0A +dABoAGUAbQBlAC0AZgBvAG4AdAA6AG0AYQBqAG8AcgAtAGwAYQB0AGkAbgA7AA0ACgAJAG0AcwBv +AC0AYgBpAGQAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcA +IABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0AYgBpAGQAaQAtAHQAaABlAG0AZQAtAGYAbwBu +AHQAOgBtAGEAagBvAHIALQBiAGkAZABpADsADQAKAAkAYwBvAGwAbwByADoAIwAyAEYANQA0ADkA +NgA7AA0ACgAJAG0AcwBvAC0AdABoAGUAbQBlAGMAbwBsAG8AcgA6AGEAYwBjAGUAbgB0ADEAOwAN +AAoACQBtAHMAbwAtAHQAaABlAG0AZQBzAGgAYQBkAGUAOgAxADkAMQA7AA0ACgAJAGYAbwBuAHQA +LQBzAHQAeQBsAGUAOgBpAHQAYQBsAGkAYwA7AH0ADQAKAHAALgBtAHMAbwBuAG8AcgBtAGEAbAAw +ACwAIABsAGkALgBtAHMAbwBuAG8AcgBtAGEAbAAwACwAIABkAGkAdgAuAG0AcwBvAG4AbwByAG0A +YQBsADAADQAKAAkAewBtAHMAbwAtAHMAdAB5AGwAZQAtAG4AYQBtAGUAOgBtAHMAbwBuAG8AcgBt +AGEAbAA7AA0ACgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AbgBvAHMAaABvAHcAOgB5AGUAcwA7AA0A +CgAJAG0AcwBvAC0AcwB0AHkAbABlAC0AcAByAGkAbwByAGkAdAB5ADoAOQA5ADsADQAKAAkAbQBz +AG8ALQBzAHQAeQBsAGUALQB1AG4AaABpAGQAZQA6AG4AbwA7AA0ACgAJAG0AcwBvAC0AbQBhAHIA +ZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAJAG0AYQByAGcAaQBuAC0AcgBp +AGcAaAB0ADoAMABjAG0AOwANAAoACQBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0A +LQBhAGwAdAA6AGEAdQB0AG8AOwANAAoACQBtAGEAcgBnAGkAbgAtAGwAZQBmAHQAOgAwAGMAbQA7 +AA0ACgAJAG0AcwBvAC0AcABhAGcAaQBuAGEAdABpAG8AbgA6AHcAaQBkAG8AdwAtAG8AcgBwAGgA +YQBuADsADQAKAAkAZgBvAG4AdAAtAHMAaQB6AGUAOgAxADIALgAwAHAAdAA7AA0ACgAJAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAsAHMA +ZQByAGkAZgA7AA0ACgAJAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBp +AGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgA7AA0ACgAJAG0AcwBvAC0A +ZgBhAHIAZQBhAHMAdAAtAHQAaABlAG0AZQAtAGYAbwBuAHQAOgBtAGkAbgBvAHIALQBmAGEAcgBl +AGEAcwB0ADsAfQANAAoAcwBwAGEAbgAuAFMAcABlAGwAbABFAA0ACgAJAHsAbQBzAG8ALQBzAHQA +eQBsAGUALQBuAGEAbQBlADoAIgAiADsADQAKAAkAbQBzAG8ALQBzAHAAbAAtAGUAOgB5AGUAcwA7 +AH0ADQAKAC4ATQBzAG8AQwBoAHAARABlAGYAYQB1AGwAdAANAAoACQB7AG0AcwBvAC0AcwB0AHkA +bABlAC0AdAB5AHAAZQA6AGUAeABwAG8AcgB0AC0AbwBuAGwAeQA7AA0ACgAJAG0AcwBvAC0AZABl +AGYAYQB1AGwAdAAtAHAAcgBvAHAAcwA6AHkAZQBzADsADQAKAAkAZgBvAG4AdAAtAHMAaQB6AGUA +OgAxADAALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBl +ADoAMQAwAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGIAaQBkAGkALQBmAG8AbgB0AC0AcwBpAHoA +ZQA6ADEAMAAuADAAcAB0ADsAfQANAAoAQABwAGEAZwBlACAAVwBvAHIAZABTAGUAYwB0AGkAbwBu +ADEADQAKAAkAewBzAGkAegBlADoANgAxADIALgAwAHAAdAAgADcAOQAyAC4AMABwAHQAOwANAAoA +CQBtAGEAcgBnAGkAbgA6ADcAMgAuADAAcAB0ACAANwAyAC4AMABwAHQAIAA3ADIALgAwAHAAdAAg +ADcAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBoAGUAYQBkAGUAcgAtAG0AYQByAGcAaQBuADoA +MwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGYAbwBvAHQAZQByAC0AbQBhAHIAZwBpAG4AOgAz +ADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AcABhAHAAZQByAC0AcwBvAHUAcgBjAGUAOgAwADsA +fQANAAoAZABpAHYALgBXAG8AcgBkAFMAZQBjAHQAaQBvAG4AMQANAAoACQB7AHAAYQBnAGUAOgBX +AG8AcgBkAFMAZQBjAHQAaQBvAG4AMQA7AH0ADQAKACAALwAqACAATABpAHMAdAAgAEQAZQBmAGkA +bgBpAHQAaQBvAG4AcwAgACoALwANAAoAIABAAGwAaQBzAHQAIABsADAADQAKAAkAewBtAHMAbwAt +AGwAaQBzAHQALQBpAGQAOgA0ADkAMgAyADgANAA5ADAAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQA +LQB0AGUAbQBwAGwAYQB0AGUALQBpAGQAcwA6ADEAOAAzADUANQA3ADAAOAA1ADQAOwB9AA0ACgBA +AGwAaQBzAHQAIABsADAAOgBsAGUAdgBlAGwAMQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAt/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +UwB5AG0AYgBvAGwAOwB9AA0ACgBAAGwAaQBzAHQAIABsADAAOgBsAGUAdgBlAGwAMgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAbwA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAG8AdQByAGkAZQByACAATgBlAHcAIgA7AA0ACgAJAG0A +cwBvAC0AYgBpAGQAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBl +AHcAIABSAG8AbQBhAG4AIgA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMAA6AGwAZQB2AGUAbAAzAA0A +CgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1 +AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwA +ZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0A +CgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABs +AGkAcwB0ACAAbAAwADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4A +dQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0A +cwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +VwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADAAOgBsAGUAdgBlAGwANQAN +AAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIA +dQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAAdAA7AA0A +CgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBs +AGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0A +CgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwAN +AAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAA +bABpAHMAdAAgAGwAMAA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBu +AHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwA +ZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAt +AHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBu +AGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4A +dAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAwADoAbABlAHYAZQBsADcA +DQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBi +AHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoA +CQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAyAC4AMABwAHQAOwAN +AAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoA +bABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwAN +AAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsA +DQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBA +AGwAaQBzAHQAIABsADAAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1 +AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkA +bgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBu +AHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkA +OgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMAA6AGwAZQB2AGUAbAA5 +AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoA +YgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6 +AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7 +AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoA +QABsAGkAcwB0ACAAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0AC0AaQBkADoAMQA2ADAAMwAx +ADgAMAAxADYAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQALQB0AGUAbQBwAGwAYQB0AGUALQBpAGQA +cwA6AC0AMQA3ADEAMQA2ADMAMQA1ADMANAA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2 +AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0A +YQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn +8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADYALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBv +AG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABw +AHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0A +DQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAt +AGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2AGUA +bAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0 +ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0 +ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8A +bgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0 +ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAA +dAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQAN +AAoAQABsAGkAcwB0ACAAbAAxADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUA +bAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQA +YQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQA +LQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBm +AG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkA +bAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAOgBsAGUAdgBl +AGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEA +dAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7 +AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBv +AG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABw +AHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0A +DQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0 +AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0A +ZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBp +AGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADoAbABlAHYA +ZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBh +AHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfw +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAyAC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkA +bwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAA +cAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9 +AA0ACgBAAGwAaQBzAHQAIABsADEAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYA +ZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +dABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgA +dAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAt +AGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0A +aQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQA6AGwAZQB2 +AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0A +YQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn +8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAA +cAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABp +AG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAA +cAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAw +AHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsA +fQANAAoAQABsAGkAcwB0ACAAbAAyAA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0AC0AaQBkADoAMgAw +ADMANQA1ADkANwAxADQAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQALQB0AGUAbQBwAGwAYQB0AGUA +LQBpAGQAcwA6ADEANwA3ADQANwA1ADIAOQAyADYAOwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBs +AGUAdgBlAGwAMQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8A +cgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0 +ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMwA2AC4A +MABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0 +AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4A +MABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAu +ADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMA +OwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBsAGUAdgBlAGwAMgANAAoACQB7AG0AcwBvAC0AbABl +AHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4 +AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkA +LQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBt +AGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBsAGUA +dgBlAGwAMwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBt +AGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoA +p/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQAwADgALgAw +AHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQA +aQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAw +AHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4A +MABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7 +AH0ADQAKAEAAbABpAHMAdAAgAGwAMgA6AGwAZQB2AGUAbAA0AA0ACgAJAHsAbQBzAG8ALQBsAGUA +dgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBt +AHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQB0AGEAYgAtAHMAdABvAHAAOgAxADQANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUA +eAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBp +AC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEA +bQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAyADoAbABl +AHYAZQBsADUADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIA +bQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6 +AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEAOAAwAC4A +MABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0 +AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4A +MABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAu +ADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMA +OwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBsAGUAdgBlAGwANgANAAoACQB7AG0AcwBvAC0AbABl +AHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AdABhAGIALQBzAHQAbwBwADoAMgAxADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABl +AHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMA +aQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBh +AG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMgA6AGwA +ZQB2AGUAbAA3AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwBy +AG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQA +OgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADUAMgAu +ADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkA +dABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAu +ADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAA +LgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBz +ADsAfQANAAoAQABsAGkAcwB0ACAAbAAyADoAbABlAHYAZQBsADgADQAKAAkAewBtAHMAbwAtAGwA +ZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAOAA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQA +ZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBz +AGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADIAOgBs +AGUAdgBlAGwAOQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8A +cgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0 +ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMwAyADQA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBp +AHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAw +AC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcA +cwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMwANAAoACQB7AG0AcwBvAC0AbABpAHMAdAAtAGkAZAA6 +ADIANQA4ADAAMgAzADMAMgA0ADsADQAKAAkAbQBzAG8ALQBsAGkAcwB0AC0AdABlAG0AcABsAGEA +dABlAC0AaQBkAHMAOgAtADgANAA1ADUAMgA3ADgAOQA0ADsAfQANAAoAQABsAGkAcwB0ACAAbAAz +ADoAbABlAHYAZQBsADEADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0A +ZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABl +AHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADMA +NgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBz +AGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEA +OAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAx +ADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4A +ZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAzADoAbABlAHYAZQBsADIADQAKAAkAewBtAHMAbwAt +AGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0A +CgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADcAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUA +dgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0 +AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4A +cwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAzADoA +bABlAHYAZQBsADMADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBv +AHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgA +dAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEAMAA4 +AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMA +aQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4 +AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEA +MAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBn +AHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADMAOgBsAGUAdgBlAGwANAANAAoACQB7AG0AcwBvAC0A +bABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA0ADQALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkA +dABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBu +AHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMwA6 +AGwAZQB2AGUAbAA1AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYA +bwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4 +AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADgA +MAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBz +AGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEA +OAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAx +ADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4A +ZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAzADoAbABlAHYAZQBsADYADQAKAAkAewBtAHMAbwAt +AGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0A +CgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAMQA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwA +ZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJ +AHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEA +bgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAt +AGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADMA +OgBsAGUAdgBlAGwANwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBm +AG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUA +eAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA1 +ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8A +cwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAx +ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoA +MQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBu +AGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMwA6AGwAZQB2AGUAbAA4AA0ACgAJAHsAbQBzAG8A +LQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwAN +AAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUA +dgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADgAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoA +CQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBh +AG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQA +LQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAz +ADoAbABlAHYAZQBsADkADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0A +ZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABl +AHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADMA +MgA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABv +AHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0A +MQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6 +ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkA +bgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADQADQAKAAkAewBtAHMAbwAtAGwAaQBzAHQALQBp +AGQAOgAzADcAMAA0ADIAMwA0ADAAMAA7AA0ACgAJAG0AcwBvAC0AbABpAHMAdAAtAHQAZQBtAHAA +bABhAHQAZQAtAGkAZABzADoALQA4ADkANwA0ADIANwA2ADEANAA7AH0ADQAKAEAAbABpAHMAdAAg +AGwANAA6AGwAZQB2AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAA +OgAzADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBw +AG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoA +LQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBl +ADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQA +aQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANAA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBz +AG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQA +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0A +bABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAK +AAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0A +YQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0 +AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwA +NAA6AGwAZQB2AGUAbAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAt +AGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQA +ZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAx +ADAAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAA +bwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAt +ADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUA +OgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABp +AG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA0ADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7 +AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwA +ZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAt +AGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0A +CgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAt +AGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4A +dAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABs +ADQAOgBsAGUAdgBlAGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIA +LQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoA +MQA4ADAALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBw +AG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoA +LQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBl +ADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQA +aQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANAA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBz +AG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQA +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwAN +AAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8A +LQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAA +bAA0ADoAbABlAHYAZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQBy +AC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +dABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6 +ADIANQAyAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0A +cABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6 +AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoA +ZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBk +AGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADQAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0A +cwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0 +ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0A +bABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsA +DQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBv +AC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8A +bgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAg +AGwANAA6AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAA +OgAzADIANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAt +AHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQA +OgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6 +AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcA +ZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA1AA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0 +AC0AaQBkADoANAAxADUANQAxADkANwA5ADUAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQALQB0AGUA +bQBwAGwAYQB0AGUALQBpAGQAcwA6AC0ANAAzADEAMwA0ADMAOAA5ADIAOwB9AA0ACgBAAGwAaQBz +AHQAIABsADUAOgBsAGUAdgBlAGwAMQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0A +YgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQA +bwBwADoAMwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQBy +AC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4A +dAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBp +AHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4A +ZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADUAOgBsAGUAdgBlAGwAMgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQA +IABsADUAOgBsAGUAdgBlAGwAMwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBw +ADoAMQAwADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIA +LQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0 +ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkA +egBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBn +AGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANQA6AGwAZQB2AGUAbAA0AA0ACgAJAHsA +bQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABl +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADQANAAuADAAcAB0ADsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQA +OwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBz +AG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYA +bwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0 +ACAAbAA1ADoAbABlAHYAZQBsADUADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIA +ZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8A +cAA6ADEAOAAwAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQBy +AC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4A +dAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBp +AHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4A +ZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADUAOgBsAGUAdgBlAGwANgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgAxADYALgAwAHAAdAA7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0 +ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0A +cwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBm +AG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMA +dAAgAGwANQA6AGwAZQB2AGUAbAA3AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABv +AHAAOgAyADUAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBu +AHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMA +aQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBu +AGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA1ADoAbABlAHYAZQBsADgADQAKAAkA +ewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABs +AGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAOAA4AC4AMABwAHQAOwANAAoACQBt +AHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYA +dAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBt +AHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkA +ZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBz +AHQAIABsADUAOgBsAGUAdgBlAGwAOQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0A +YgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQA +bwBwADoAMwAyADQALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUA +bgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBz +AGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkA +bgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANgANAAoACQB7AG0AcwBvAC0AbABp +AHMAdAAtAGkAZAA6ADQAOQAxADgAMAAwADEAMQA2ADsADQAKAAkAbQBzAG8ALQBsAGkAcwB0AC0A +dABlAG0AcABsAGEAdABlAC0AaQBkAHMAOgAtADMAMgA3ADIAMAA1ADAANAA7AH0ADQAKAEAAbABp +AHMAdAAgAGwANgA6AGwAZQB2AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMA +dABvAHAAOgAzADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUA +bgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBz +AGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkA +bgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANgA6AGwAZQB2AGUAbAAyAA0ACgAJ +AHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwA +bABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0 +ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0A +cwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBm +AG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMA +dAAgAGwANgA6AGwAZQB2AGUAbAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABv +AHAAOgAxADAAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBu +AHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMA +aQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBu +AGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA2ADoAbABlAHYAZQBsADQADQAKAAkA +ewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABs +AGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBt +AHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYA +dAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBt +AHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkA +ZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBz +AHQAIABsADYAOgBsAGUAdgBlAGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0A +YgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQA +bwBwADoAMQA4ADAALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUA +bgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBz +AGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkA +bgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANgA6AGwAZQB2AGUAbAA2AA0ACgAJ +AHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwA +bABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBm +AHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkA +bQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJ +AGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkA +cwB0ACAAbAA2ADoAbABlAHYAZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0 +AG8AcAA6ADIANQAyAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIA +ZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABl +AG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0A +cwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBp +AG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADYAOgBsAGUAdgBlAGwAOAANAAoA +CQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBs +AGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUA +ZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoA +CQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABp +AHMAdAAgAGwANgA6AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMA +dABvAHAAOgAzADIANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQA +ZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAt +AHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcA +aQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA3AA0ACgAJAHsAbQBzAG8ALQBs +AGkAcwB0AC0AaQBkADoAOAAwADQAMQA1ADMAMgA1ADEAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQA +LQB0AGUAbQBwAGwAYQB0AGUALQBpAGQAcwA6ADEAMAAzADEAMQA1ADYAMwA5ADYAOwB9AA0ACgBA +AGwAaQBzAHQAIABsADcAOgBsAGUAdgBlAGwAMQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAt/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +UwB5AG0AYgBvAGwAOwB9AA0ACgBAAGwAaQBzAHQAIABsADcAOgBsAGUAdgBlAGwAMgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAbwA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBDAG8AdQByAGkAZQByACAATgBlAHcAIgA7AA0ACgAJAG0A +cwBvAC0AYgBpAGQAaQAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBl +AHcAIABSAG8AbQBhAG4AIgA7AH0ADQAKAEAAbABpAHMAdAAgAGwANwA6AGwAZQB2AGUAbAAzAA0A +CgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1 +AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwA +ZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0A +CgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABs +AGkAcwB0ACAAbAA3ADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4A +dQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0A +cwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +VwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADcAOgBsAGUAdgBlAGwANQAN +AAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIA +dQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAAdAA7AA0A +CgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBs +AGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0A +CgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwAN +AAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAA +bABpAHMAdAAgAGwANwA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBu +AHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwA +ZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAt +AHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBu +AGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4A +dAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA3ADoAbABlAHYAZQBsADcA +DQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBi +AHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoA +CQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAyAC4AMABwAHQAOwAN +AAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoA +bABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwAN +AAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsA +DQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBA +AGwAaQBzAHQAIABsADcAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1 +AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkA +bgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBu +AHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkA +OgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwANwA6AGwAZQB2AGUAbAA5 +AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoA +YgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6 +AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7 +AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoA +QABsAGkAcwB0ACAAbAA4AA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0AC0AaQBkADoAOQA2ADQAOAA1 +ADAAMQA2ADcAOwANAAoACQBtAHMAbwAtAGwAaQBzAHQALQB0AGUAbQBwAGwAYQB0AGUALQBpAGQA +cwA6AC0AMQAwADMAMgA3ADEAMAAxADMAMgA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2 +AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0A +YQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn +8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADYALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBv +AG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABw +AHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0A +DQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAt +AGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2AGUA +bAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0 +ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0 +ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8A +bgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0 +ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAA +dAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQAN +AAoAQABsAGkAcwB0ACAAbAA4ADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUA +bAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQA +YQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQA +LQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBm +AG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkA +bAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADgAOgBsAGUAdgBl +AGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEA +dAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7 +AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBv +AG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAA +dAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABw +AHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0A +DQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0 +AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0A +ZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBp +AGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA4ADoAbABlAHYA +ZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBh +AHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfw +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAyAC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkA +bwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAA +cAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9 +AA0ACgBAAGwAaQBzAHQAIABsADgAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYA +ZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBz +AG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +dABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgA +dAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAt +AGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0A +aQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOAA6AGwAZQB2 +AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0A +YQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn +8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAA +cAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABp +AG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAA +cAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAw +AHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsA +fQANAAoAQABsAGkAcwB0ACAAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGkAcwB0AC0AaQBkADoAMQA0 +ADcANwA4ADYAOAA3ADMAOAA7AA0ACgAJAG0AcwBvAC0AbABpAHMAdAAtAHQAZQBtAHAAbABhAHQA +ZQAtAGkAZABzADoALQAyADYAOAA5ADIAMQA0ADcAMgA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6 +AGwAZQB2AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYA +bwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4 +AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADYA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBp +AHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAw +AC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcA +cwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBzAG8ALQBs +AGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoA +CQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGEAYgAtAHMAdABvAHAAOgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABl +AHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMA +aQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBh +AG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6AGwA +ZQB2AGUAbAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwBy +AG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQA +OgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAu +ADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkA +dABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAu +ADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAA +LgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBz +ADsAfQANAAoAQABsAGkAcwB0ACAAbAA5ADoAbABlAHYAZQBsADQADQAKAAkAewBtAHMAbwAtAGwA +ZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEANAA0AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQA +ZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBz +AGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADkAOgBs +AGUAdgBlAGwANQANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8A +cgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0 +ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBp +AHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgA +LgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAw +AC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcA +cwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBs +AGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoA +CQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUA +dgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0 +AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4A +cwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAA5ADoA +bABlAHYAZQBsADcADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBv +AHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgA +dAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIANQAy +AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMA +aQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4 +AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEA +MAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBn +AHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADkAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0A +bABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYA +ZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkA +dABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBu +AHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAOQA6 +AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYA +bwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4 +AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIA +NAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBz +AGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEA +OAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAx +ADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4A +ZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADAADQAKAAkAewBtAHMAbwAtAGwAaQBzAHQALQBp +AGQAOgAxADUANwA5ADMAMQA4ADUAOAA5ADsADQAKAAkAbQBzAG8ALQBsAGkAcwB0AC0AdABlAG0A +cABsAGEAdABlAC0AaQBkAHMAOgAtADEAMAA2ADUAMAA3ADQAOAA2ADAAOwB9AA0ACgBAAGwAaQBz +AHQAIABsADEAMAA6AGwAZQB2AGUAbAAxAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUA +bQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAHQAZQB4AHQAOgC38DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMA +dABvAHAAOgAzADYALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBl +AHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUA +bgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBz +AGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBTAHkA +bQBiAG8AbAA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAwADoAbABlAHYAZQBsADIADQAKAAkAewBt +AHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AG8AOwANAAoACQBtAHMAbwAt +AGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADcAMgAuADAAcAB0ADsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwAN +AAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8A +LQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6ACIAQwBvAHUAcgBpAGUAcgAgAE4AZQB3ACIAOwANAAoACQBtAHMA +bwAtAGIAaQBkAGkALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3 +ACAAUgBvAG0AYQBuACIAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAMAA6AGwAZQB2AGUAbAAzAA0A +CgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1 +AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkA +bQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAxADAAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwA +ZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAK +AAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0A +CgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABs +AGkAcwB0ACAAbAAxADAAOgBsAGUAdgBlAGwANAANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBs +AGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIA +LQBzAHQAbwBwADoAMQA0ADQALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1 +AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkA +bgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBu +AHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkA +OgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAwADoAbABlAHYAZQBs +ADUADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQA +OgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwAN +AAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEAOAAwAC4AMABwAHQA +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBu +ADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQA +OwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0 +ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0A +CgBAAGwAaQBzAHQAIABsADEAMAA6AGwAZQB2AGUAbAA2AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0 +AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0 +AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0A +ZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBp +AGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADAAOgBsAGUA +dgBlAGwANwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBt +AGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoA +p/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMgA1ADIALgAw +AHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQA +aQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAw +AHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4A +MABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7 +AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAwADoAbABlAHYAZQBsADgADQAKAAkAewBtAHMAbwAtAGwA +ZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAOAA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2 +AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQA +ZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBz +AGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAMAA6 +AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYA +bwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4 +AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAzADIA +NAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBz +AGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEA +OAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAx +ADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4A +ZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADEADQAKAAkAewBtAHMAbwAtAGwAaQBzAHQALQBp +AGQAOgAxADkAOQAxADcAOQAwADgANAA3ADsADQAKAAkAbQBzAG8ALQBsAGkAcwB0AC0AdABlAG0A +cABsAGEAdABlAC0AaQBkAHMAOgAtADMAOQA4ADAAMQAyADIAMgA7AH0ADQAKAEAAbABpAHMAdAAg +AGwAMQAxADoAbABlAHYAZQBsADEADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIA +ZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBs +AC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8A +cAA6ADMANgAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAt +AHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQA +OgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6 +AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcA +ZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADEAOgBsAGUAdgBlAGwAMgANAAoACQB7 +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwA +ZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBv +AC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoANwAyAC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQA +IABsADEAMQA6AGwAZQB2AGUAbAAzAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABv +AHAAOgAxADAAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBu +AHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMA +aQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBu +AGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADEAOgBsAGUAdgBlAGwANAANAAoA +CQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBs +AGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA0ADQALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUA +ZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoA +CQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABp +AHMAdAAgAGwAMQAxADoAbABlAHYAZQBsADUADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4A +dQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0A +cwB0AG8AcAA6ADEAOAAwAC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +VwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAMQA6AGwAZQB2AGUAbAA2 +AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoA +YgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADEANgAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6 +AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7 +AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoA +QABsAGkAcwB0ACAAbAAxADEAOgBsAGUAdgBlAGwANwANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBs +AC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABh +AGIALQBzAHQAbwBwADoAMgA1ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAt +AGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAxADoAbABlAHYA +ZQBsADgADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBh +AHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfw +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADIAOAA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkA +bwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAA +cAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9 +AA0ACgBAAGwAaQBzAHQAIABsADEAMQA6AGwAZQB2AGUAbAA5AA0ACgAJAHsAbQBzAG8ALQBsAGUA +dgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBt +AHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwA +LQB0AGEAYgAtAHMAdABvAHAAOgAzADIANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBl +AGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUA +eAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBp +AC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEA +bQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADIADQAK +AAkAewBtAHMAbwAtAGwAaQBzAHQALQBpAGQAOgAyADAANgAxADIAMAAzADAAOAA3ADsADQAKAAkA +bQBzAG8ALQBsAGkAcwB0AC0AdABlAG0AcABsAGEAdABlAC0AaQBkAHMAOgAxADEAOAA2ADgAOAAw +ADkAMgA4ADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADIAOgBsAGUAdgBlAGwAMQANAAoACQB7AG0A +cwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0 +ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0A +bABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMwA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAt +AGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0A +CgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAt +AGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4A +dAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABs +ADEAMgA6AGwAZQB2AGUAbAAyAA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAt +AHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAA +OgA3ADIALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBw +AG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoA +LQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBl +ADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQA +aQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAyADoAbABlAHYAZQBsADMADQAKAAkAewBt +AHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUA +dAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAt +AGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADEAMAA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7 +AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMA +bwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQA +IABsADEAMgA6AGwAZQB2AGUAbAA0AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBi +AGUAcgAtAGYAbwByAG0AYQB0ADoAYgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUA +bAAtAHQAZQB4AHQAOgCn8DsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABv +AHAAOgAxADQANAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUA +cgAtAHAAbwBzAGkAdABpAG8AbgA6AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBu +AHQAOgAtADEAOAAuADAAcAB0ADsADQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMA +aQB6AGUAOgAxADAALgAwAHAAdAA7AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBu +AGcAZABpAG4AZwBzADsAfQANAAoAQABsAGkAcwB0ACAAbAAxADIAOgBsAGUAdgBlAGwANQANAAoA +CQB7AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBs +AGwAZQB0ADsADQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0A +cwBvAC0AbABlAHYAZQBsAC0AdABhAGIALQBzAHQAbwBwADoAMQA4ADAALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AbABlAHYAZQBsAC0AbgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUA +ZgB0ADsADQAKAAkAdABlAHgAdAAtAGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJ +AG0AcwBvAC0AYQBuAHMAaQAtAGYAbwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoA +CQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABp +AHMAdAAgAGwAMQAyADoAbABlAHYAZQBsADYADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4A +dQBtAGIAZQByAC0AZgBvAHIAbQBhAHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABl +AHYAZQBsAC0AdABlAHgAdAA6AKfwOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0A +cwB0AG8AcAA6ADIAMQA2AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBt +AGIAZQByAC0AcABvAHMAaQB0AGkAbwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4A +ZABlAG4AdAA6AC0AMQA4AC4AMABwAHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0 +AC0AcwBpAHoAZQA6ADEAMAAuADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +VwBpAG4AZwBkAGkAbgBnAHMAOwB9AA0ACgBAAGwAaQBzAHQAIABsADEAMgA6AGwAZQB2AGUAbAA3 +AA0ACgAJAHsAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAGYAbwByAG0AYQB0ADoA +YgB1AGwAbABlAHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAZQB4AHQAOgCn8DsADQAK +AAkAbQBzAG8ALQBsAGUAdgBlAGwALQB0AGEAYgAtAHMAdABvAHAAOgAyADUAMgAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBsAGUAdgBlAGwALQBuAHUAbQBiAGUAcgAtAHAAbwBzAGkAdABpAG8AbgA6 +AGwAZQBmAHQAOwANAAoACQB0AGUAeAB0AC0AaQBuAGQAZQBuAHQAOgAtADEAOAAuADAAcAB0ADsA +DQAKAAkAbQBzAG8ALQBhAG4AcwBpAC0AZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgAwAHAAdAA7 +AA0ACgAJAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AFcAaQBuAGcAZABpAG4AZwBzADsAfQANAAoA +QABsAGkAcwB0ACAAbAAxADIAOgBsAGUAdgBlAGwAOAANAAoACQB7AG0AcwBvAC0AbABlAHYAZQBs +AC0AbgB1AG0AYgBlAHIALQBmAG8AcgBtAGEAdAA6AGIAdQBsAGwAZQB0ADsADQAKAAkAbQBzAG8A +LQBsAGUAdgBlAGwALQB0AGUAeAB0ADoAp/A7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABh +AGIALQBzAHQAbwBwADoAMgA4ADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0A +bgB1AG0AYgBlAHIALQBwAG8AcwBpAHQAaQBvAG4AOgBsAGUAZgB0ADsADQAKAAkAdABlAHgAdAAt +AGkAbgBkAGUAbgB0ADoALQAxADgALgAwAHAAdAA7AA0ACgAJAG0AcwBvAC0AYQBuAHMAaQAtAGYA +bwBuAHQALQBzAGkAegBlADoAMQAwAC4AMABwAHQAOwANAAoACQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgBXAGkAbgBnAGQAaQBuAGcAcwA7AH0ADQAKAEAAbABpAHMAdAAgAGwAMQAyADoAbABlAHYA +ZQBsADkADQAKAAkAewBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AZgBvAHIAbQBh +AHQAOgBiAHUAbABsAGUAdAA7AA0ACgAJAG0AcwBvAC0AbABlAHYAZQBsAC0AdABlAHgAdAA6AKfw +OwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAHQAYQBiAC0AcwB0AG8AcAA6ADMAMgA0AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGwAZQB2AGUAbAAtAG4AdQBtAGIAZQByAC0AcABvAHMAaQB0AGkA +bwBuADoAbABlAGYAdAA7AA0ACgAJAHQAZQB4AHQALQBpAG4AZABlAG4AdAA6AC0AMQA4AC4AMABw +AHQAOwANAAoACQBtAHMAbwAtAGEAbgBzAGkALQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAuADAA +cAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAVwBpAG4AZwBkAGkAbgBnAHMAOwB9 +AA0ACgBvAGwADQAKAAkAewBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtADoAMABjAG0AOwB9AA0A +CgB1AGwADQAKAAkAewBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtADoAMABjAG0AOwB9AA0ACgAt +AC0APgANAAoAPAAvAHMAdAB5AGwAZQA+AA0ACgA8ACEALQAtAFsAaQBmACAAZwB0AGUAIABtAHMA +bwAgADEAMABdAD4ADQAKADwAcwB0AHkAbABlAD4ADQAKACAALwAqACAAUwB0AHkAbABlACAARABl +AGYAaQBuAGkAdABpAG8AbgBzACAAKgAvAA0ACgAgAHQAYQBiAGwAZQAuAE0AcwBvAE4AbwByAG0A +YQBsAFQAYQBiAGwAZQANAAoACQB7AG0AcwBvAC0AcwB0AHkAbABlAC0AbgBhAG0AZQA6ACIAVABh +AGIAbABlACAATgBvAHIAbQBhAGwAIgA7AA0ACgAJAG0AcwBvAC0AdABzAHQAeQBsAGUALQByAG8A +dwBiAGEAbgBkAC0AcwBpAHoAZQA6ADAAOwANAAoACQBtAHMAbwAtAHQAcwB0AHkAbABlAC0AYwBv +AGwAYgBhAG4AZAAtAHMAaQB6AGUAOgAwADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBuAG8A +cwBoAG8AdwA6AHkAZQBzADsADQAKAAkAbQBzAG8ALQBzAHQAeQBsAGUALQBwAHIAaQBvAHIAaQB0 +AHkAOgA5ADkAOwANAAoACQBtAHMAbwAtAHMAdAB5AGwAZQAtAHAAYQByAGUAbgB0ADoAIgAiADsA +DQAKAAkAbQBzAG8ALQBwAGEAZABkAGkAbgBnAC0AYQBsAHQAOgAwAGMAbQAgADUALgA0AHAAdAAg +ADAAYwBtACAANQAuADQAcAB0ADsADQAKAAkAbQBzAG8ALQBwAGEAcgBhAC0AbQBhAHIAZwBpAG4A +OgAwAGMAbQA7AA0ACgAJAG0AcwBvAC0AcABhAHIAYQAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABv +AG0AOgAuADAAMAAwADEAcAB0ADsADQAKAAkAbQBzAG8ALQBwAGEAZwBpAG4AYQB0AGkAbwBuADoA +dwBpAGQAbwB3AC0AbwByAHAAaABhAG4AOwANAAoACQBmAG8AbgB0AC0AcwBpAHoAZQA6ADEAMAAu +ADAAcAB0ADsADQAKAAkAZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUA +dwAgAFIAbwBtAGEAbgAiACwAcwBlAHIAaQBmADsAfQANAAoAPAAvAHMAdAB5AGwAZQA+AA0ACgA8 +ACEAWwBlAG4AZABpAGYAXQAtAC0APgA8ACEALQAtAFsAaQBmACAAZwB0AGUAIABtAHMAbwAgADkA +XQA+ADwAeABtAGwAPgANAAoAIAA8AG8AOgBzAGgAYQBwAGUAZABlAGYAYQB1AGwAdABzACAAdgA6 +AGUAeAB0AD0AIgBlAGQAaQB0ACIAIABzAHAAaQBkAG0AYQB4AD0AIgAxADAAMgA2ACIALwA+AA0A +CgA8AC8AeABtAGwAPgA8ACEAWwBlAG4AZABpAGYAXQAtAC0APgA8ACEALQAtAFsAaQBmACAAZwB0 +AGUAIABtAHMAbwAgADkAXQA+ADwAeABtAGwAPgANAAoAIAA8AG8AOgBzAGgAYQBwAGUAbABhAHkA +bwB1AHQAIAB2ADoAZQB4AHQAPQAiAGUAZABpAHQAIgA+AA0ACgAgACAAPABvADoAaQBkAG0AYQBw +ACAAdgA6AGUAeAB0AD0AIgBlAGQAaQB0ACIAIABkAGEAdABhAD0AIgAxACIALwA+AA0ACgAgADwA +LwBvADoAcwBoAGEAcABlAGwAYQB5AG8AdQB0AD4APAAvAHgAbQBsAD4APAAhAFsAZQBuAGQAaQBm +AF0ALQAtAD4ADQAKADwALwBoAGUAYQBkAD4ADQAKAA0ACgA8AGIAbwBkAHkAIABsAGEAbgBnAD0A +RQBOAC0AVQBTACAAbABpAG4AawA9AGIAbAB1AGUAIAB2AGwAaQBuAGsAPQBwAHUAcgBwAGwAZQAg +AHMAdAB5AGwAZQA9ACcAdABhAGIALQBpAG4AdABlAHIAdgBhAGwAOgAzADYALgAwAHAAdAAnAD4A +DQAKAA0ACgA8AGQAaQB2ACAAYwBsAGEAcwBzAD0AVwBvAHIAZABTAGUAYwB0AGkAbwBuADEAPgAN +AAoADQAKADwAaAAxAD4APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgA8AHMA +cABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBm +AGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AcABtAHAA +PAAvAHMAcABhAG4APgA8AC8AcwBwAGEAbgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBt +AHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUA +cwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+ACAAZQBuAGgAYQBuAGMAZQBtAGUAbgB0ACAALQAg +AHMAcABpAGsAZQAsACAAcwBhAGkAbAANAAoAYQBuAGQAIAB1AG4AaQB0ACAAdABlAHMAdAA8AG8A +OgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AaAAxAD4ADQAKAA0ACgA8AGQAaQB2 +AD4ADQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBkAGkAcwBjAD4ADQAKACAAPABsAGkAIABjAGwA +YQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBn +AGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIA +bwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBz +AHQAOgBsADcAIABsAGUAdgBlAGwAMQAgAGwAZgBvADEAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwA +aQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAt +AGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIA +VABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4APABhACAAaAByAGUAZgA9ACIAIwBw +AG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUA +bgBpAHQAdABlAHMAdAAtAFMAcAAiAD4AMQAuAA0ACgAgACAAIAAgACAAUwBwAGkAawBlACAAYwBo +AGEAbgBnAGUAcwA8AC8AYQA+ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwA +LwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0 +AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8A +OwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwAN +AAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwANwAgAGwAZQB2AGUAbAAxACAAbABmAG8A +MQA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABh +AG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEA +bQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAi +ACcAPgA8AGEAIABoAHIAZQBmAD0AIgAjAHAAbQBwAGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAtAHMA +cABpAGsAZQAsAHMAYQBpAGwAYQBuAGQAdQBuAGkAdAB0AGUAcwB0AC0AUwBhACIAPgAyAC4ADQAK +ACAAIAAgACAAIABTAGEAaQBsAC0APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUA +PgByAGkAcwBjAHYAPAAvAHMAcABhAG4APgA8AC8AYQA+ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8 +AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4A +bwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBh +AGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwA +dAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwANwAgAGwAZQB2 +AGUAbAAxACAAbABmAG8AMQA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAA +cAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAt +AGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUA +dwAgAFIAbwBtAGEAbgAiACcAPgA8AGEAIABoAHIAZQBmAD0AIgAjAHAAbQBwAGUAbgBoAGEAbgBj +AGUAbQBlAG4AdAAtAHMAcABpAGsAZQAsAHMAYQBpAGwAYQBuAGQAdQBuAGkAdAB0AGUAcwB0AC0A +VQBuACIAPgAzAC4ADQAKACAAIAAgACAAIABVAG4AaQB0ACAAdABlAHMAdAA8AC8AYQA+ACAAPABv +ADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgA8AC8AdQBsAD4A +DQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBkAGkAcwBjAD4ADQAKACAAPAB1AGwAIAB0AHkAcABl +AD0AYwBpAHIAYwBsAGUAPgANAAoAIAAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIA +bQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0 +ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoA +DQAKACAAIAAgACAAIAAgAGEAdQB0AG8AOwBtAHMAbwAtAGwAaQBzAHQAOgBsADcAIABsAGUAdgBl +AGwAMgAgAGwAZgBvADEAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAA3ADIALgAwAHAA +dAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBh +AHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcA +IABSAG8AbQBhAG4AIgAnAD4APABhAA0ACgAgACAAIAAgACAAIABoAHIAZQBmAD0AIgAjAHAAbQBw +AGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAtAHMAcABpAGsAZQAsAHMAYQBpAGwAYQBuAGQAdQBuAGkA +dAB0AGUAcwB0AC0AVABlACIAPgAzAC4AMQAuACAAVABlAHMAdAAgAHAAbABhAG4APAAvAGEAPgAg +ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AC8A +dQBsAD4ADQAKADwALwB1AGwAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9AGQAaQBzAGMAPgAN +AAoAIAA8AHUAbAAgAHQAeQBwAGUAPQBjAGkAcgBjAGwAZQA+AA0ACgAgACAAPAB1AGwAIAB0AHkA +cABlAD0AcwBxAHUAYQByAGUAPgANAAoAIAAgACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBO +AG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0A +YQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBs +AHQAOgANAAoAIAAgACAAIAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAA3ACAA +bABlAHYAZQBsADMAIABsAGYAbwAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMQAw +ADgALgAwAHAAdAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIAAgAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBl +AHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgA8AGEADQAKACAAIAAgACAAIAAgACAAaAByAGUA +ZgA9ACIAIwBwAG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBs +AGEAbgBkAHUAbgBpAHQAdABlAHMAdAAtAEMAUwAiAD4AMwAuADEALgAxAC4AIABDAFMAUgAgAGEA +YwBjAGUAcwBzADwALwBhAD4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAv +AGwAaQA+AA0ACgAgACAAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAA +cwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0 +AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AA0ACgAgACAA +IAAgACAAIAAgAGEAdQB0AG8AOwBtAHMAbwAtAGwAaQBzAHQAOgBsADcAIABsAGUAdgBlAGwAMwAg +AGwAZgBvADEAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAxADAAOAAuADAAcAB0ACcA +PgA8AHMAcABhAG4ADQAKACAAIAAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQBy +AGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAA +UgBvAG0AYQBuACIAJwA+ADwAYQANAAoAIAAgACAAIAAgACAAIABoAHIAZQBmAD0AIgAjAHAAbQBw +AGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAtAHMAcABpAGsAZQAsAHMAYQBpAGwAYQBuAGQAdQBuAGkA +dAB0AGUAcwB0AC0ATQBlACIAPgAzAC4AMQAuADIALgAgAE0AZQBtAG8AcgB5ACAAYQBjAGMAZQBz +AHMAPAAvAGEAPgANAAoAIAAgACAAIAAgACAAIAA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMA +cABhAG4APgA8AC8AbABpAD4ADQAKACAAIAA8AC8AdQBsAD4ADQAKACAAPAAvAHUAbAA+AA0ACgA8 +AC8AdQBsAD4ADQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBkAGkAcwBjAD4ADQAKACAAPAB1AGwA +IAB0AHkAcABlAD0AYwBpAHIAYwBsAGUAPgANAAoAIAAgADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1 +AGEAcgBlAD4ADQAKACAAIAAgADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAA +IAAgACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9 +ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8A +LQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgANAAoAIAAgACAAIAAgACAAIAAg +AGEAdQB0AG8AOwBtAHMAbwAtAGwAaQBzAHQAOgBsADcAIABsAGUAdgBlAGwANAAgAGwAZgBvADEA +OwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAxADQANAAuADAAcAB0ACcAPgA8AHMAcABh +AG4ADQAKACAAIAAgACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMA +dAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBh +AG4AIgAnAD4APABhAA0ACgAgACAAIAAgACAAIAAgACAAaAByAGUAZgA9ACIAIwBwAG0AcABlAG4A +aABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBpAHQAdABl +AHMAdAAtAE4AbwAiAD4AMwAuADEALgAyAC4AMQAuACAATgBvAG4ALQBzAGgAYQByAGUAIABtAG8A +ZABlADwALwBhAD4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+ +AA0ACgAgACAAIAAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQA +eQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7 +AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoADQAKACAAIAAgACAA +IAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAA3ACAAbABlAHYAZQBsADQAIABs +AGYAbwAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMQA0ADQALgAwAHAAdAAnAD4A +PABzAHAAYQBuAA0ACgAgACAAIAAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQBy +AGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAA +UgBvAG0AYQBuACIAJwA+ADwAYQANAAoAIAAgACAAIAAgACAAIAAgAGgAcgBlAGYAPQAiACMAcABt +AHAAZQBuAGgAYQBuAGMAZQBtAGUAbgB0AC0AcwBwAGkAawBlACwAcwBhAGkAbABhAG4AZAB1AG4A +aQB0AHQAZQBzAHQALQBTAGgAIgA+ADMALgAxAC4AMgAuADIALgAgAFMAaABhAHIAZQAgAG0AbwBk +AGUAPAAvAGEAPgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4A +DQAKACAAIAAgADwALwB1AGwAPgANAAoAIAAgADwALwB1AGwAPgANAAoAIAA8AC8AdQBsAD4ADQAK +ADwALwB1AGwAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9AGQAaQBzAGMAPgANAAoAIAA8AHUA +bAAgAHQAeQBwAGUAPQBjAGkAcgBjAGwAZQA+AA0ACgAgACAAPAB1AGwAIAB0AHkAcABlAD0AcwBx +AHUAYQByAGUAPgANAAoAIAAgACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEA +bAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBh +AHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgANAAoA +IAAgACAAIAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAA3ACAAbABlAHYAZQBs +ADMAIABsAGYAbwAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMQAwADgALgAwAHAA +dAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBm +AGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUA +dwAgAFIAbwBtAGEAbgAiACcAPgA8AGEADQAKACAAIAAgACAAIAAgACAAaAByAGUAZgA9ACIAIwBw +AG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUA +bgBpAHQAdABlAHMAdAAtAFQAZQAiAD4AMwAuADEALgAzAC4AIABUAGUAcwB0ACAAcgBhAG4AZwBl +ADwALwBhAD4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0A +CgAgACAAPAAvAHUAbAA+AA0ACgAgADwALwB1AGwAPgANAAoAPAAvAHUAbAA+AA0ACgANAAoAPAB1 +AGwAIAB0AHkAcABlAD0AZABpAHMAYwA+AA0ACgAgADwAdQBsACAAdAB5AHAAZQA9AGMAaQByAGMA +bABlAD4ADQAKACAAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0 +AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8A +OwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AA0ACgAgACAAIAAg +ACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAA3ACAAbABlAHYAZQBsADIAIABsAGYA +bwAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAANwAyAC4AMABwAHQAJwA+ADwAcwBw +AGEAbgANAAoAIAAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQA +LQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBu +ACIAJwA+ADwAYQANAAoAIAAgACAAIAAgACAAaAByAGUAZgA9ACIAIwBwAG0AcABlAG4AaABhAG4A +YwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBpAHQAdABlAHMAdAAt +AFQAZQAiAD4AMwAuADIALgAgAFQAZQBzAHQAIABpAG0AcABsAGUAbQBlAG4AdABhAHQAaQBvAG4A +PAAvAGEAPgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAK +ACAAPAAvAHUAbAA+AA0ACgA8AC8AdQBsAD4ADQAKAA0ACgA8AC8AZABpAHYAPgANAAoADQAKADwA +aAAxACAAaQBkAD0AIgBwAG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABz +AGEAaQBsAGEAbgBkAHUAbgBpAHQAdABlAHMAdAAtAFMAcABpAGsAZQBjAGgAYQBuAGcAZQBzACIA +PgA8AHMAcABhAG4ADQAKAGMAbABhAHMAcwA9AG4AaAAtAG4AdQBtAGIAZQByAD4APABzAHAAYQBu +ACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0A +aQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+ADEALgAgADwALwBz +AHAAYQBuAD4APAAvAHMAcABhAG4APgA8AHMAcABhAG4ADQAKAHMAdAB5AGwAZQA9ACcAbQBzAG8A +LQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABO +AGUAdwAgAFIAbwBtAGEAbgAiACcAPgBTAHAAaQBrAGUAIABjAGgAYQBuAGcAZQBzADwAbwA6AHAA +PgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBoADEAPgANAAoADQAKADwAcAA+AEMAaABh +AG4AZwBlAHMAIABoAGEAdgBlACAAYgBlAGUAbgAgAHMAdQBiAG0AaQB0AHQAZQBkACAAdABvACYA +bgBiAHMAcAA7ADwAYQANAAoAaAByAGUAZgA9ACIAaAB0AHQAcABzADoALwAvAGcAaQB0AGgAdQBi +AC4AYwBvAG0ALwBqAG8AeABpAGUALwByAGkAcwBjAHYALQBpAHMAYQAtAHMAaQBtAC4AZwBpAHQA +IgA+AGgAdAB0AHAAcwA6AC8ALwBnAGkAdABoAHUAYgAuAGMAbwBtAC8AagBvAHgAaQBlAC8AcgBp +AHMAYwB2AC0AaQBzAGEALQBzAGkAbQAuAGcAaQB0ADwALwBhAD4AJgBuAGIAcwBwADsAPABvADoA +cAA+ADwALwBvADoAcAA+ADwALwBwAD4ADQAKAA0ACgA8AGgAMQAgAGkAZAA9ACIAcABtAHAAZQBu +AGgAYQBuAGMAZQBtAGUAbgB0AC0AcwBwAGkAawBlACwAcwBhAGkAbABhAG4AZAB1AG4AaQB0AHQA +ZQBzAHQALQBTAGEAaQBsAC0AcgBpAHMAYwB2ACIAPgA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBu +AGgALQBuAHUAbQBiAGUAcgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYA +YQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3 +ACAAUgBvAG0AYQBuACIAJwA+ADIALgAgADwALwBzAHAAYQBuAD4APAAvAHMAcABhAG4APgA8AHMA +cABhAG4ADQAKAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAt +AGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBTAGEA +aQBsAC0APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgByAGkAcwBjAHYAPAAv +AHMAcABhAG4APgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AaAAxAD4A +DQAKAA0ACgA8AHAAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAA+AEMAaABhAG4AZwBl +AHMAIABoAGEAcwAgAGIAZQBlAG4AIABzAHUAYgBtAGkAdAB0AGUAZAAgAHQAbwAgADwAcwBwAGEA +bgAgAHMAdAB5AGwAZQA9ACcAZgBvAG4AdAAtAHMAaQB6AGUAOgAxADAALgA1AHAAdAA7AA0ACgBm +AG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFMAZQBnAG8AZQAgAFUASQAiACwAcwBhAG4AcwAtAHMA +ZQByAGkAZgA7AG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4APABhAA0ACgBoAHIAZQBmAD0A +IgBoAHQAdABwAHMAOgAvAC8AZwBpAHQAaAB1AGIALgBjAG8AbQAvAGoAbwB4AGkAZQAvAHMAYQBp +AGwALQByAGkAcwBjAHYALwBjAG8AbQBtAGkAdABzAC8AbQBhAHMAdABlAHIALwAiACAAdABhAHIA +ZwBlAHQAPQAiAF8AYgBsAGEAbgBrACIADQAKAHQAaQB0AGwAZQA9ACIAaAB0AHQAcABzADoALwAv +AGcAaQB0AGgAdQBiAC4AYwBvAG0ALwBqAG8AeABpAGUALwBzAGEAaQBsAC0AcgBpAHMAYwB2AC8A +YwBvAG0AbQBpAHQAcwAvAG0AYQBzAHQAZQByAC8AIgA+AGgAdAB0AHAAcwA6AC8ALwBnAGkAdABo +AHUAYgAuAGMAbwBtAC8AagBvAHgAaQBlAC8AcwBhAGkAbAAtAHIAaQBzAGMAdgAvADwALwBhAD4A +PABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAHAAPgANAAoADQAKADwAcAA+ +AEsAbgBvAHcAbgAgAGkAcwBzAHUAZQBzACAAcwBlAGUAJgBuAGIAcwBwADsAPABhAA0ACgBoAHIA +ZQBmAD0AIgBoAHQAdABwAHMAOgAvAC8AZwBpAHQAaAB1AGIALgBjAG8AbQAvAHIAZQBtAHMALQBw +AHIAbwBqAGUAYwB0AC8AcwBhAGkAbAAtAHIAaQBzAGMAdgAvAGkAcwBzAHUAZQBzACIAPgBoAHQA +dABwAHMAOgAvAC8AZwBpAHQAaAB1AGIALgBjAG8AbQAvAHIAZQBtAHMALQBwAHIAbwBqAGUAYwB0 +AC8AcwBhAGkAbAAtAHIAaQBzAGMAdgAvAGkAcwBzAHUAZQBzADwALwBhAD4APAAvAHAAPgANAAoA +DQAKADwAaAAxAD4APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AbgBoAC0AbgB1AG0AYgBlAHIAPgA8 +AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQA +LQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AMwAu +AA0ACgA8AC8AcwBwAGEAbgA+ADwALwBzAHAAYQBuAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0A +JwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBt +AGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AFUAbgBpAHQAIAB0AGUAcwB0ADwAbwA6AHAA +PgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBoADEAPgANAAoADQAKADwAaAAyACAAaQBk +AD0AIgBwAG0AcABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEA +bgBkAHUAbgBpAHQAdABlAHMAdAAtAFQAZQBzAHQAcABsAGEAbgAiAD4APABzAHAAYQBuACAAYwBs +AGEAcwBzAD0AbgBoAC0AbgB1AG0AYgBlAHIAPgA8AHMAcABhAG4ADQAKAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBl +AHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgAzAC4AMQAuACAAPAAvAHMAcABhAG4APgA8AC8A +cwBwAGEAbgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBz +AHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0A +YQBuACIAJwA+AFQAZQBzAHQAIABwAGwAYQBuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBw +AGEAbgA+ADwALwBoADIAPgANAAoADQAKADwAcAA+AFQAaABlACAAdABlAHMAdAAgAGMAYQBuACAA +YgBlACAAZABpAHYAaQBkAGUAZAAgAGkAbgB0AG8AIAAyACAAZwByAG8AdQBwAHMAOgA8AC8AcAA+ +AA0ACgANAAoAPAB1AGwAIAB0AHkAcABlAD0AcwBxAHUAYQByAGUAPgANAAoAIAA8AGwAaQAgAGMA +bABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQBy +AGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0A +YgBvAHQAdABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABp +AHMAdAA6AGwANgAgAGwAZQB2AGUAbAAxACAAbABmAG8AMgA7AHQAYQBiAC0AcwB0AG8AcABzADoA +bABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBv +AC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAA +IgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBDAFMAUgAgAGEAYwBjAGUAcwBz +ACAAbwBuACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcABhAGQA +ZAByADwALwBzAHAAYQBuAD4ALAAgADwAcwBwAGEAbgANAAoAIAAgACAAIAAgAGMAbABhAHMAcwA9 +AFMAcABlAGwAbABFAD4AcABtAHAAYwBmAGcAPAAvAHMAcABhAG4APgAgAGEAbgBkACAAPABzAHAA +YQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBtAHMAZQBjAGMAZgBnADwALwBzAHAAYQBu +AD4ALgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKACAA +PABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBz +AG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEA +cgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABt +AHMAbwAtAGwAaQBzAHQAOgBsADYAIABsAGUAdgBlAGwAMQAgAGwAZgBvADIAOwB0AGEAYgAtAHMA +dABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABl +AD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoA +IAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AQgBhAHMAZQBk +ACAAbwBuACAAcwBwAGUAYwBpAGYAaQBlAGQAIABDAFMAUgAgAGMAbwBuAGYAaQBnACwAIAB0AGgA +ZQAgAG0AZQBtAG8AcgB5ACAAYQBjAGMAZQBzAHMAIAB2AGkAYQANAAoAIAAgACAAIAAgAFAATQBQ +ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAPAAvAHUA +bAA+AA0ACgANAAoAPABwAD4AVABvACAAbQBhAGsAZQAgAHQAZQBzAHQAcwAgAHMAaQBtAHAAbABl +AHIALAAgAGEAbABsACAAdABlAHMAdAAgAGMAYQBzAGUAcwAgAGEAcgBlACAAcABlAHIAZgBvAHIA +bQBlAGQAIABpAG4AIABmAG8AbABsAG8AdwBpAG4AZwAgAHMAdABlAHAAcwA6ADwALwBwAD4ADQAK +AA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBzAHEAdQBhAHIAZQA+AA0ACgAgADwAbABpACAAYwBsAGEA +cwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBp +AG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8A +dAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0 +ADoAbAA1ACAAbABlAHYAZQBsADEAIABsAGYAbwAzADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkA +cwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBm +AGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQA +aQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AFMAdABlAHAAIAAxADoAIABzAGUAdAAg +AHUAcAAgAG8AbgBlACAAbwBmACAAYwBvAG4AZgBpAGcAdQByAGEAdABpAG8AbgBzADwAbwA6AHAA +PgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABh +AHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcA +aQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBv +AHQAdABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMA +dAA6AGwANQAgAGwAZQB2AGUAbAAxACAAbABmAG8AMwA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABp +AHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0A +ZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBU +AGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBTAHQAZQBwACAAMgA6ACAAdAByAHkA +IAB0AG8AIABhAGMAYwBlAHMAcwAgAEMAUwBSACgAcwApACAAbwByACAAbQBlAG0AbwByAGkAZQBz +ACwAIAByAGUAYwBvAHIAZAAgAGUAcgByAG8AcgBzAA0ACgAgACAAIAAgACAAaQBmACAAYQBuAHkA +PABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgAgADwAbABp +ACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0A +bQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBp +AG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBzAG8A +LQBsAGkAcwB0ADoAbAA1ACAAbABlAHYAZQBsADEAIABsAGYAbwAzADsAdABhAGIALQBzAHQAbwBw +AHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAg +ACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AFMAdABlAHAAIAAzADoA +IAB2AGUAcgBpAGYAeQAgAHIAZQBzAHUAbAB0AHMAIABhAHIAZQAgAGEAcwAgAGUAeABwAGUAYwB0 +AGUAZAA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKADwA +LwB1AGwAPgANAAoADQAKADwAcAA+AFQAaABlACAAYwBvAG0AYgBpAG4AYQB0AGkAbwBuAHMAIABv +AGYAIABjAG8AbgBmAGkAZwB1AHIAYQB0AGkAbwBuAHMAIABhAG4AZAAgAGEAYwBjAGUAcwBzACAA +dAB5AHAAZQAgAHcAaQBsAGwAIABiAGUAIABjAG8AdgBlAHIAZQBkAC4AIABGAG8AcgAgAHMAdABl +AHAADQAKADEALAAgAGkAdAAgAGkAbgBjAGwAdQBkAGUAcwAgAGIAbwB0AGgAIABlAHgAcABsAGkA +YwBpAHQAIAB2AGEAbAB1AGUAcwAgACgAbABpAGsAZQAgAEMAUwBSACAAdgBhAGwAdQBlAHMAKQAg +AGEAbgBkACAAaQBtAHAAbABpAGMAaQB0ACAAcwB0AGEAdAB1AHMAIAAoAGwAaQBrAGUADQAKAHcA +aABlAHQAaABlAHIAIABQAE0AUAAgAGwAbwBjAGsAIAB3AGEAcwAgAHMAZQB0ACkALgAgAEkAbgAg +AHQAaABpAHMAIAB3AGEAeQAsACAAdwBlACAAYwBhAG4AIABhAHYAbwBpAGQAIABjAHIAZQBhAHQA +aQBuAGcAIAB2AGEAcgBpAG8AdQBzACAAdABlAHMAdAANAAoAcwBlAHEAdQBlAG4AYwBlAHMALgA8 +AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHAAPgANAAoADQAKADwAaAAzACAAaQBkAD0AIgBwAG0A +cABlAG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBp +AHQAdABlAHMAdAAtAEMAUwBSAGEAYwBjAGUAcwBzACIAPgA8AHMAcABhAG4AIABjAGwAYQBzAHMA +PQBuAGgALQBuAHUAbQBiAGUAcgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBtAHMAbwAt +AGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4A +ZQB3ACAAUgBvAG0AYQBuACIAJwA+ADMALgAxAC4AMQAuACAAPAAvAHMAcABhAG4APgA8AC8AcwBw +AGEAbgA+ADwAcwBwAGEAbgANAAoAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQA +LQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBu +ACIAJwA+AEMAUwBSACAAYQBjAGMAZQBzAHMAPABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAA +YQBuAD4APAAvAGgAMwA+AA0ACgANAAoAPABwAD4ARgBvAHIAIABDAFMAUgAgAGEAYwBjAGUAcwBz +ACwAIABvAG4AbAB5ACAATQAgAG0AbwBkAGUAIABpAHMAIAB0AGUAcwB0AGUAZAAuADwALwBwAD4A +DQAKAA0ACgA8AHAAPgBUAGUAcwB0ACAAYwBvAG4AZgBpAGcAdQByAGEAdABpAG8AbgBzACAAYwBh +AG4AIABiAGUAIABjAG8AbQBiAGkAbgBhAHQAaQBvAG4AcwAgAG8AZgAgAGYAbwBsAGwAbwB3AGkA +bgBnADoAPAAvAHAAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAK +ACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBt +AGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAA +IABtAHMAbwAtAGwAaQBzAHQAOgBsADgAIABsAGUAdgBlAGwAMQAgAGwAZgBvADQAOwB0AGEAYgAt +AHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkA +bABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAN +AAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AUABNAFAA +IABsAG8AYwBrAGUAZAAgAG8AcgAgAG4AbwB0ADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBw +AGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0A +YQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6 +AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AGEA +dQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwAOAAgAGwAZQB2AGUAbAAx +ACAAbABmAG8ANAA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcA +PgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIA +bwBtAGEAbgAiACcAPgBJAGYAIABQAE0AUAAgAG4AbwB0ACAAbABvAGMAawBlAGQALAAgAHcAaABl +AHQAaABlAHIAIABpAHQAIAB3AGEAcwAgAGwAbwBjAGsAZQBkACAAcAByAGUAdgBpAG8AdQBzAGwA +eQA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKACAAPABs +AGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8A +LQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBn +AGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABtAHMA +bwAtAGwAaQBzAHQAOgBsADgAIABsAGUAdgBlAGwAMQAgAGwAZgBvADQAOwB0AGEAYgAtAHMAdABv +AHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAYwBsAGEAcwBzAD0A +UwBwAGUAbABsAEUAPgA8AHMAcABhAG4ADQAKACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBv +AC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAA +TgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AbQBzAGUAYwBjAGYAZwA8AC8AcwBwAGEAbgA+ADwALwBz +AHAAYQBuAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYA +YQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3 +ACAAUgBvAG0AYQBuACIAJwA+ACAAYgBpAHQAcwAsACAAaQBuAGMAbAB1AGQAaQBuAGcAIAA8AHMA +cABhAG4ADQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHIAbABiADwALwBz +AHAAYQBuAD4ALAAgADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4AbQBtAHcA +cAA8AC8AcwBwAGEAbgA+ACAAYQBuAGQAIABtAG0AbAA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAv +AHMAcABhAG4APgA8AC8AbABpAD4ADQAKADwALwB1AGwAPgANAAoADQAKADwAcAA+AEEAYwB0AGkA +bwBuACAAdAB5AHAAZQBzACAAYgBhAG4AIABiAGUAIABjAG8AbQBiAGkAbgBhAHQAaQBvAG4AcwAg +AG8AZgAgAGYAbwBsAGwAbwB3AGkAbgBnADoAPAAvAHAAPgANAAoADQAKADwAdQBsACAAdAB5AHAA +ZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBt +AGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQA +OgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBh +AHUAdABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADEAMQAgAGwAZQB2AGUA +bAAxACAAbABmAG8ANQA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0 +ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYA +bwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAg +AFIAbwBtAGEAbgAiACcAPgBUAGEAcgBnAGUAdAAgAEMAUwBSACgAcwApACwAIAB3AGgAZQB0AGgA +ZQByACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcABjAGYAZwA8 +AC8AcwBwAGEAbgA+AC8APABzAHAAYQBuAA0ACgAgACAAIAAgACAAYwBsAGEAcwBzAD0AUwBwAGUA +bABsAEUAPgBwAG0AcABhAGQAZAByADwALwBzAHAAYQBuAD4AIABvAHIAIAA8AHMAcABhAG4AIABj +AGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AG0AcwBlAGMAYwBmAGcAPAAvAHMAcABhAG4APgA8AG8A +OgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKACAAPABsAGkAIABj +AGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEA +cgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAt +AGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwA +aQBzAHQAOgBsADEAMQAgAGwAZQB2AGUAbAAxACAAbABmAG8ANQA7AHQAYQBiAC0AcwB0AG8AcABz +ADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0A +cwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAg +ACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBUAGEAcgBnAGUAdAAgAHYA +YQBsAHUAZQBzACAAPABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+ +AA0ACgAgADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAAIAA8AGwAaQAgAGMA +bABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQBy +AGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0A +YgBvAHQAdABvAG0ALQBhAGwAdAA6AA0ACgAgACAAIAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBs +AGkAcwB0ADoAbAAxADEAIABsAGUAdgBlAGwAMgAgAGwAZgBvADUAOwB0AGEAYgAtAHMAdABvAHAA +cwA6AGwAaQBzAHQAIAA3ADIALgAwAHAAdAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIABz +AHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwA +eQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4ARgBvAHIAIAA8AHMAcABh +AG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AG0AcwBlAGMAYwBmAGcAPAAvAHMAcABhAG4A +PgAsAA0ACgAgACAAIAAgACAAIABpAHQAcwAgADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABl +AGwAbABFAD4AcgBsAGIAPAAvAHMAcABhAG4APgAsACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0A +UwBwAGUAbABsAEUAPgBtAG0AdwBwADwALwBzAHAAYQBuAD4AIABhAG4AZAAgADwAcwBwAGEAbgAN +AAoAIAAgACAAIAAgACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBtAG0AcAA8AC8AcwBwAGEA +bgA+ACAAYgBpAHQAcwA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABp +AD4ADQAKACAAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkA +bABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBt +AHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AA0ACgAgACAAIAAgACAA +IABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAAxADEAIABsAGUAdgBlAGwAMgAgAGwAZgBv +ADUAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAA3ADIALgAwAHAAdAAnAD4APABzAHAA +YQBuAA0ACgAgACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAt +AGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4A +IgAnAD4ARgBvAHIAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHAAbQBw +AGEAZABkAHIAPAAvAHMAcABhAG4APgAsAA0ACgAgACAAIAAgACAAIABzAGUAbABlAGMAdABlAGQA +IAB2AGEAbABpAGQAIAB2AGEAbAB1AGUAcwA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABh +AG4APgA8AC8AbABpAD4ADQAKACAAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0A +YQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6 +AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AA0A +CgAgACAAIAAgACAAIABhAHUAdABvADsAbQBzAG8ALQBsAGkAcwB0ADoAbAAxADEAIABsAGUAdgBl +AGwAMgAgAGwAZgBvADUAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAA3ADIALgAwAHAA +dAAnAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBh +AHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcA +IABSAG8AbQBhAG4AIgAnAD4ARgBvAHIAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBs +AGwARQA+AHAAbQBwAGMAZgBnADwALwBzAHAAYQBuAD4ALAANAAoAIAAgACAAIAAgACAAZgBvAHIA +IABSAC8AVwAvAFgAIAB2AGEAbAB1AGUAcwAgAGEAbgBkACAAcwB1AGIALQBpAG4AZABlAHgALgA8 +AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKACAAPAAvAHUA +bAA+AA0ACgA8AC8AdQBsAD4ADQAKAA0ACgA8AGgAMwAgAGkAZAA9ACIAcABtAHAAZQBuAGgAYQBu +AGMAZQBtAGUAbgB0AC0AcwBwAGkAawBlACwAcwBhAGkAbABhAG4AZAB1AG4AaQB0AHQAZQBzAHQA +LQBNAGUAbQBvAHIAeQBhAGMAYwBlAHMAcwAiAD4APABzAHAAYQBuAA0ACgBjAGwAYQBzAHMAPQBu +AGgALQBuAHUAbQBiAGUAcgA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEA +cgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAg +AFIAbwBtAGEAbgAiACcAPgAzAC4AMQAuADIALgAgADwALwBzAHAAYQBuAD4APAAvAHMAcABhAG4A +PgA8AHMAcABhAG4ADQAKAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBv +AG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcA +PgBNAGUAbQBvAHIAeQAgAGEAYwBjAGUAcwBzADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBw +AGEAbgA+ADwALwBoADMAPgANAAoADQAKADwAcAA+AFQAZQBzAHQAIABmAG8AcgAgAG0AZQBtAG8A +cgB5ACAAYQBjAGMAZQBzAHMAIABjAGEAbgAgAGIAZQAgAGQAaQB2AGkAZABlAGQAIABpAG4AdABv +ACAAMgAgAHAAYQByAHQAcwA6ADwALwBwAD4ADQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBzAHEA +dQBhAHIAZQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABz +AHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQA +bwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7 +AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAxACAAbABlAHYAZQBsADEAIABsAGYA +bwA2ADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBw +AGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYA +YQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBu +ACIAJwA+AG4AbwBuAC0AcwBoAGEAcgBlACAAbQBvAGQAZQAgACgAUgBXACYAbgBiAHMAcAA7ACEA +PQAgADAAMQApADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgAN +AAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0A +JwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAt +AG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoAIAAgACAA +IAAgAG0AcwBvAC0AbABpAHMAdAA6AGwAMQAgAGwAZQB2AGUAbAAxACAAbABmAG8ANgA7AHQAYQBi +AC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQA +eQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBzAGgA +YQByAGUAIABtAG8AZABlACAAYQBjAGMAZQBzAHMAIABiAGUAdAB3AGUAZQBuACAAVQAgAG0AbwBk +AGUAIABhAG4AZAAgAE0AIABtAG8AZABlAC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAA +YQBuAD4APAAvAGwAaQA+AA0ACgA8AC8AdQBsAD4ADQAKAA0ACgA8AHAAPgBJAG4AIABiAG8AdABo +ACAAYwBhAHMAZQBzACwAIAB3AGgAZQBuACAAdABoAGUAcgBlACAAaQBzACAAYQAgADwAcwBwAGEA +bgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4AcABtAHAAPAAvAHMAcABhAG4APgAgAGUAeABj +AGUAcAB0AGkAbwBuACAAZgByAG8AbQAgAGwAbwBhAGQALwBzAHQAbwByAGUALAANAAoAdABoAGUA +IABlAHgAYwBlAHAAdABpAG8AbgAgAGgAYQBuAGQAbABlAHIAIAB3AGkAbABsACAAcgBlAGQAaQBy +AGUAYwB0ACAAUABDACAAdABvACAAcgBlAHMAdQBtAGUAIABuAGUAeAB0ACAAaQBuAHMAdAByAHUA +YwB0AGkAbwBuAC4AIABXAGgAaQBsAGUAIABmAG8AcgANAAoAZgBlAHQAYwBoACAAZQB4AGMAZQBw +AHQAaQBvAG4ALAAgAHQAaABlACAAdABlAHMAdAAgAHcAaQBsAGwAIABiAHIAZQBhAGsALAAgAGEA +bgBkACAAcgBlAHAAbwByAHQAIAByAGUAcwB1AGwAdAAgAGkAbQBtAGUAZABpAGEAdABlAGwAeQAu +ADwALwBwAD4ADQAKAA0ACgA8AHAAPgBBACAAcwBwAGUAYwBpAGEAbAAgAGMAYQBzAGUAIABpAHMA +IAB0AGgAZQAgAGQAZQBwAGUAbgBkAGUAbgBjAHkAIABiAGUAdAB3AGUAZQBuACAATAAgAGIAaQB0 +ACAAYQBuAGQAIABNAE0ATAAgAGIAaQB0AC4AIABMACAAYgBpAHQAIABpAHMAIABzAGUAdAAgAGkA +bgANAAoAYQBkAHYAYQBuAGMAZQAgAHQAbwAgAHMAZQB0ACAATQBNAEwAIABiAGkAdAA7ACAAbwB0 +AGgAZQByAHcAaQBzAGUAIABhAGYAdABlAHIAIABNAE0ATAAgAGIAaQB0ACAAcwBlAHQALAAgAE0A +IABtAG8AZABlACAAaQBzACAAbgBvAHQAIABhAGwAbABvAHcAZQBkACAAdABvAA0ACgBhAGMAYwBl +AHMAcwAgAGUAdgBlAG4AIABpAG4AdABlAHIAcgB1AHAAdAAgAGgAYQBuAGQAbABlAHIALgA8AG8A +OgBwAD4APAAvAG8AOgBwAD4APAAvAHAAPgANAAoADQAKADwAaAA0ACAAaQBkAD0AIgBwAG0AcABl +AG4AaABhAG4AYwBlAG0AZQBuAHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBpAHQA +dABlAHMAdAAtAE4AbwBuAC0AcwBoAGEAcgBlAG0AbwBkAGUAIgA+ADwAcwBwAGEAbgANAAoAYwBs +AGEAcwBzAD0AbgBoAC0AbgB1AG0AYgBlAHIAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0A +cwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBz +ACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AMwAuADEALgAyAC4AMQAuAA0ACgA8AC8AcwBwAGEA +bgA+ADwALwBzAHAAYQBuAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQBy +AGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAA +UgBvAG0AYQBuACIAJwA+AE4AbwBuAC0AcwBoAGEAcgBlAA0ACgBtAG8AZABlADwAbwA6AHAAPgA8 +AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBoADQAPgANAAoADQAKADwAcAA+AEYAbwByACAA +bgBvAG4ALQBzAGgAYQByAGUAIABtAG8AZABlACwAIAB0AGUAcwB0ACAAYwBvAG4AZgBpAGcAdQBy +AGEAdABpAG8AbgBzACAAYwBhAG4AIABiAGUAIABjAG8AbQBiAGkAbgBhAHQAaQBvAG4AcwAgAG8A +ZgAgAGYAbwBsAGwAbwB3AGkAbgBnADoAPAAvAHAAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9 +AHMAcQB1AGEAcgBlAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEA +bAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBh +AHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUA +dABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADQAIABsAGUAdgBlAGwAMQAg +AGwAZgBvADcAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4A +PABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgA8AHMAcABhAG4ADQAKACAAIAAg +ACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEA +bQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AbQBzAGUAYwBj +AGYAZwA8AC8AcwBwAGEAbgA+ADwALwBzAHAAYQBuAD4APABzAHAAYQBuAA0ACgAgACAAIAAgACAA +cwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+ACAAYgBpAHQAcwAsACAA +aQBuAGMAbAB1AGQAaQBuAGcAIAA8AHMAcABhAG4ADQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBT +AHAAZQBsAGwARQA+AG0AbQB3AHAAPAAvAHMAcABhAG4APgAgAGEAbgBkACAAbQBtAGwALgAgAEgA +ZQByAGUAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHIAbABiADwALwBz +AHAAYQBuAD4AIABiAGkAdABzAA0ACgAgACAAIAAgACAAYwBhAG4AIABiAGUAIABvAHAAdABpAG8A +bgBhAGwAPABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgAg +ADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0A +cwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBh +AHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAA +bQBzAG8ALQBsAGkAcwB0ADoAbAA0ACAAbABlAHYAZQBsADEAIABsAGYAbwA3ADsAdABhAGIALQBz +AHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwA +ZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAK +ACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AEEAIABtAGEA +dABjAGgAZQBkACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcAA8 +AC8AcwBwAGEAbgA+ACAAZQBuAHQAcgBhAG4AYwBlACAAaQBzAA0ACgAgACAAIAAgACAAYQB2AGEA +aQBsAGEAYgBsAGUAIABvAHIAIABuAG8AdAA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABh +AG4APgA8AC8AbABpAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEA +bAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBh +AHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUA +dABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADQAIABsAGUAdgBlAGwAMQAg +AGwAZgBvADcAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4A +PABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0 +AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8A +bQBhAG4AIgAnAD4AVwBoAGUAbgAgAG0AYQB0AGMAaABlAGQALAAgAHIAZQBsAGEAdABlAGQAIABS +AC8AVwAvAFgALwBMACAAYgBpAHQAcwA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4A +PgA8AC8AbABpAD4ADQAKADwALwB1AGwAPgANAAoADQAKADwAcAA+AFIAZQBsAGEAdABlAGQAIABh +AGMAdABpAG8AbgAgAHQAeQBwAGUAcwAgAGIAYQBuACAAYgBlACAAYwBvAG0AYgBpAG4AYQB0AGkA +bwBuAHMAIABvAGYAIABmAG8AbABsAG8AdwBpAG4AZwA6ADwALwBwAD4ADQAKAA0ACgA8AHUAbAAg +AHQAeQBwAGUAPQBzAHEAdQBhAHIAZQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8A +TgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAt +AGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEA +bAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAA5ACAAbABl +AHYAZQBsADEAIABsAGYAbwA4ADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4A +MABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0 +AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4A +ZQB3ACAAUgBvAG0AYQBuACIAJwA+AE0AIABtAG8AZABlACAAbwByACAAVQAgAG0AbwBkAGUAPABv +ADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgA8AC8AdQBsAD4A +DQAKAA0ACgA8AHAAPgBMAG8AYQBkAC8AcwB0AG8AcgBlACAAYQBuAGQAIABmAGUAdABjAGgAIABh +AHIAZQAgAGEAbABsACAAdABlAHMAdABlAGQAIABpAG4AIABlAGEAYwBoACAAdABlAHMAdAAgAGMA +YQBzAGUALgA8AC8AcAA+AA0ACgANAAoAPABwAD4APABvADoAcAA+ACYAbgBiAHMAcAA7ADwALwBv +ADoAcAA+ADwALwBwAD4ADQAKAA0ACgA8AGgANAAgAGkAZAA9ACIAcABtAHAAZQBuAGgAYQBuAGMA +ZQBtAGUAbgB0AC0AcwBwAGkAawBlACwAcwBhAGkAbABhAG4AZAB1AG4AaQB0AHQAZQBzAHQALQBT +AGgAYQByAGUAbQBvAGQAZQAiAD4APABzAHAAYQBuACAAYwBsAGEAcwBzAD0AbgBoAC0AbgB1AG0A +YgBlAHIAPgA8AHMAcABhAG4ADQAKAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0 +AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEA +bgAiACcAPgAzAC4AMQAuADIALgAyAC4AIAA8AC8AcwBwAGEAbgA+ADwALwBzAHAAYQBuAD4APABz +AHAAYQBuAA0ACgBzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQA +LQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AUwBo +AGEAcgBlACAAbQBvAGQAZQA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8A +aAA0AD4ADQAKAA0ACgA8AHAAPgBGAG8AcgAgAHMAaABhAHIAZQAgAG0AbwBkAGUALAAgAGEAcwBz +AHUAbQBlACAAUgBXAD0AMAAxACwAIABtAG0AbAAvADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMA +cABlAGwAbABFAD4AbQBtAHcAcAA8AC8AcwBwAGEAbgA+ACAAcwBlAHQAIABhAG4AZAANAAoAdABo +AGUAcgBlACAAaQBzACAAbQBhAHQAYwBoAGUAZAAgAGUAbgB0AHIAYQBuAGMAZQAsACAAdABlAHMA +dAAgAGMAbwBuAGYAaQBnAHUAcgBhAHQAaQBvAG4AcwAgAGMAYQBuACAAYgBlACAAYwBvAG0AYgBp +AG4AYQB0AGkAbwBuAHMAIABvAGYADQAKAGYAbwBsAGwAbwB3AGkAbgBnADoAPAAvAHAAPgANAAoA +DQAKADwAdQBsACAAdAB5AHAAZQA9AGQAaQBzAGMAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9 +AE0AcwBvAE4AbwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0A +dABvAHAALQBhAGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABv +AG0ALQBhAGwAdAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwA +MQAwACAAbABlAHYAZQBsADEAIABsAGYAbwA5ADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0 +ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4A +PABzAHAAYQBuAA0ACgAgACAAIAAgACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBz +AHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0A +YQBuACIAJwA+AHAAbQBwAGMAZgBnADwALwBzAHAAYQBuAD4APAAvAHMAcABhAG4APgA8AHMAcABh +AG4ADQAKACAAIAAgACAAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYA +bwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAn +AD4AIABSAC8AWAAvAEwAIABiAGkAdABzAC4AIABIAGUAcgBlACAAUgAgAGIAaQB0AA0ACgAgACAA +IAAgACAAPQAgADEAIAB0AG8AIABnAGkAdgBlACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBw +AGUAbABsAEUAPgBuAGEAZwBhAHQAaQB2AGUAPAAvAHMAcABhAG4APgAgAHQAZQBzAHQAcwAgAGYA +bwByACAAbgBvAG4ALQBzAGgAYQByAGUAIABtAG8AZABlAC4APABvADoAcAA+ADwALwBvADoAcAA+ +ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgA8AC8AdQBsAD4ADQAKAA0ACgA8AHAAPgBSAGUA +bABhAHQAZQBkACAAYQBjAHQAaQBvAG4AIAB0AHkAcABlAHMAIABiAGEAbgAgAGIAZQAgAGMAbwBt +AGIAaQBuAGEAdABpAG8AbgBzACAAbwBmACAAZgBvAGwAbABvAHcAaQBuAGcAOgA8AC8AcAA+AA0A +CgANAAoAPAB1AGwAIAB0AHkAcABlAD0AZABpAHMAYwA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBz +AD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4A +LQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0 +AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoA +bAAwACAAbABlAHYAZQBsADEAIABsAGYAbwAxADAAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBz +AHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYA +YQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABp +AG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4ATQAgAG0AbwBkAGUAIABvAHIAIABVACAA +bQBvAGQAZQA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAK +ACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcA +bQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBt +AGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAA +IABtAHMAbwAtAGwAaQBzAHQAOgBsADAAIABsAGUAdgBlAGwAMQAgAGwAZgBvADEAMAA7AHQAYQBi +AC0AcwB0AG8AcABzADoAbABpAHMAdAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQA +eQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6 +AA0ACgAgACAAIAAgACAAIgBUAGkAbQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBMAG8A +YQBkAC8AUwB0AG8AcgBlACAAbwByACAAZQB4AGUAYwB1AHQAZQAuADwAbwA6AHAAPgA8AC8AbwA6 +AHAAPgA8AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAPAAvAHUAbAA+AA0ACgANAAoAPABoADMA +IABpAGQAPQAiAHAAbQBwAGUAbgBoAGEAbgBjAGUAbQBlAG4AdAAtAHMAcABpAGsAZQAsAHMAYQBp +AGwAYQBuAGQAdQBuAGkAdAB0AGUAcwB0AC0AVABlAHMAdAByAGEAbgBnAGUAIgA+ADwAcwBwAGEA +bgAgAGMAbABhAHMAcwA9AG4AaAAtAG4AdQBtAGIAZQByAD4APABzAHAAYQBuAA0ACgBzAHQAeQBs +AGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIA +VABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AMwAuADEALgAzAC4AIAA8AC8AcwBw +AGEAbgA+ADwALwBzAHAAYQBuAD4APABzAHAAYQBuAA0ACgBzAHQAeQBsAGUAPQAnAG0AcwBvAC0A +ZgBhAHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBl +AHcAIABSAG8AbQBhAG4AIgAnAD4AVABlAHMAdAAgAHIAYQBuAGcAZQA8AG8AOgBwAD4APAAvAG8A +OgBwAD4APAAvAHMAcABhAG4APgA8AC8AaAAzAD4ADQAKAA0ACgA8AHAAPgBFAHgAaQBzAHQAZQBk +ACAAZgBlAGEAdAB1AHIAZQBzACAAYQByAGUAIABuAG8AdAAgAHQAZQBzAHQAIAB0AGEAcgBnAGUA +dABzACwAIABpAGYAIAB0AGgAZQB5ACAAYQByAGUAIABpAG4AZABlAHAAZQBuAGQAZQBuAHQAIAB3 +AGkAdABoACYAbgBiAHMAcAA7ADwAcwBwAGEAbgANAAoAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUA +PgBwAG0AcAA8AC8AcwBwAGEAbgA+ACAAZQBuAGgAYQBuAGMAZQBtAGUAbgB0AC4AJgBuAGIAcwBw +ADsAPAAvAHAAPgANAAoADQAKADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAA +PABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBz +AG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEA +cgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABt +AHMAbwAtAGwAaQBzAHQAOgBsADEAMgAgAGwAZQB2AGUAbAAxACAAbABmAG8AMQAxADsAdABhAGIA +LQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5 +AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoA +DQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0AYQBuACIAJwA+AFQAaABl +ACAAZQB4AGkAcwB0AGUAZAAgADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4A +cABtAHAAPAAvAHMAcABhAG4APgAgAHAAcgBvAHQAZQBjAHQAaQBvAG4ADQAKACAAIAAgACAAIAB3 +AGkAdABoACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcABjAGYA +ZwA8AC8AcwBwAGEAbgA+ACAAYQBuAGQAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBs +AGwARQA+AHAAbQBwAGEAZABkAHIAPAAvAHMAcABhAG4APgANAAoAIAAgACAAIAAgAGkAcwAgAG4A +bwB0ACAAdABlAHMAdABlAGQALgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8 +AC8AbABpAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMA +dAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUAdABv +ADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABvADsA +DQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADEAMgAgAGwAZQB2AGUAbAAxACAAbABm +AG8AMQAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ADwA +cwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4AdAAt +AGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBvAG0A +YQBuACIAJwA+AFQAaABlACAAZQBuAHQAcgBhAG4AYwBlACAAcAByAGkAbwByAGkAdAB5ACAAYwBo +AGUAYwBrACAAaQBzACAAbgBvAHQAIAB0AGUAcwB0AGUAZAAuACAASQBuAHMAdABlAGEAZAAsACAA +YQBzAA0ACgAgACAAIAAgACAAcgBlAGwAYQB0AGUAZAAgAHQAbwAgADwAcwBwAGEAbgAgAGMAbABh +AHMAcwA9AFMAcABlAGwAbABFAD4AbQBzAGUAYwBjAGYAZwA8AC8AcwBwAGEAbgA+ACwAIABiAG8A +dABoACAAbQBhAHQAYwBoAGUAZAAgAGEAbgBkACAAbgBvAG4ALQBtAGEAdABjAGgAZQBkAA0ACgAg +ACAAIAAgACAAYQByAGUAIAB0AGUAcwB0AGUAZAAuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8A +cwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4AbwBy +AG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBhAGwA +dAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwAdAA6 +AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwAMQAyACAAbABlAHYA +ZQBsADEAIABsAGYAbwAxADEAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAw +AHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQA +LQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBl +AHcAIABSAG8AbQBhAG4AIgAnAD4ATwBuAGwAeQAgAE0AIABtAG8AZABlACAAYQBuAGQAIABVACAA +bQBvAGQAZQAgAGEAcgBlACAAdABlAHMAdABlAGQALgAgAEYAbwByACAAPABzAHAAYQBuAA0ACgAg +ACAAIAAgACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBwAG0AcAA8AC8AcwBwAGEAbgA+ACwA +IABpAHQAIABjAGEAcgBlAHMAIABtAG8AcgBlACAAbwBuACAAdwBoAGUAdABoAGUAcgAgAE0AIABt +AG8AZABlACAAbwByACAAbgBvAHQALgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4A +PgA8AC8AbABpAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMAPQBNAHMAbwBOAG8AcgBtAGEAbAAg +AHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAHQAbwBwAC0AYQBsAHQAOgBhAHUA +dABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQAbwBtAC0AYQBsAHQAOgBhAHUAdABv +ADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBsADEAMgAgAGwAZQB2AGUAbAAxACAA +bABmAG8AMQAxADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4AMABwAHQAJwA+ +ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0AC0AZgBvAG4A +dAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4AZQB3ACAAUgBv +AG0AYQBuACIAJwA+AEYAbwByACAAQwBTAFIAIABhAGMAYwBlAHMAcwAgAG8AbgAgADwAcwBwAGEA +bgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4AcABtAHAAYQBkAGQAcgA8AC8AcwBwAGEAbgA+ +ACAAYQBuAGQADQAKACAAIAAgACAAIAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwA +RQA+AHAAbQBwAGMAZgBnADwALwBzAHAAYQBuAD4ALAAgAHQAZQBzAHQAIABmAG8AYwB1AHMAIABv +AG4AIABSAC8AVwAvAFgAIABiAGkAdABzACAAcwBpAG4AYwBlACAAdABoAGUAcgBlACAAaQBzAA0A +CgAgACAAIAAgACAAYQAgAG4AZQB3ACAAbQBvAGQAZQAgAFIAVwA9ADAAMQAuACAAUAByAG8AdABl +AGMAdABpAG8AbgAgAHcAaQB0AGgAIABMACAAYgBpAHQAIABpAHMAIABuAG8AdAAgADwAcwBwAGEA +bgAgAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4AYwBvAHYAZQByAHIAZQBkADwALwBzAHAAYQBu +AD4ALgA8AG8AOgBwAD4APAAvAG8AOgBwAD4APAAvAHMAcABhAG4APgA8AC8AbABpAD4ADQAKADwA +LwB1AGwAPgANAAoADQAKADwAaAAyACAAaQBkAD0AIgBwAG0AcABlAG4AaABhAG4AYwBlAG0AZQBu +AHQALQBzAHAAaQBrAGUALABzAGEAaQBsAGEAbgBkAHUAbgBpAHQAdABlAHMAdAAtAFQAZQBzAHQA +aQBtAHAAbABlAG0AZQBuAHQAYQB0AGkAbwBuACIAPgA8AHMAcABhAG4ADQAKAGMAbABhAHMAcwA9 +AG4AaAAtAG4AdQBtAGIAZQByAD4APABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYA +YQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgAiAFQAaQBtAGUAcwAgAE4AZQB3 +ACAAUgBvAG0AYQBuACIAJwA+ADMALgAyAC4AIAA8AC8AcwBwAGEAbgA+ADwALwBzAHAAYQBuAD4A +PABzAHAAYQBuAA0ACgBzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBhAHIAZQBhAHMAdAAtAGYAbwBu +AHQALQBmAGEAbQBpAGwAeQA6ACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4A +VABlAHMAdAAgAGkAbQBwAGwAZQBtAGUAbgB0AGEAdABpAG8AbgA8AG8AOgBwAD4APAAvAG8AOgBw +AD4APAAvAHMAcABhAG4APgA8AC8AaAAyAD4ADQAKAA0ACgA8AHAAPgBUAG8AIABnAGUAdAAgAGUA +eABwAGUAYwB0AGUAZAAgAGMAbwBtAGIAaQBuAGEAdABpAG8AbgBzACAAZgBvAHIAIAB0AGUAcwB0 +ACAAYwBvAG4AZgBpAGcAdQByAGEAdABpAG8AbgBzACAAYQBuAGQAIABhAGMAdABpAG8AbgAgAHQA +eQBwAGUAcwAsACAARwBOAFUAIAA8AHMAcABhAG4ADQAKAGMAbABhAHMAcwA9AFMAcABlAGwAbABF +AD4AZwBlAG4AZwBlAG4APAAvAHMAcABhAG4APgAgADEALgA0AC4AMgAgAGkAcwAgAHUAcwBlAGQA +IAB0AG8AIABnAGUAbgBlAHIAYQB0AGUAIAB0AGUAcwB0ACAAYwBhAHMAZQBzAC4APAAvAHAAPgAN +AAoADQAKADwAcAA+AFMAZQBlACYAbgBiAHMAcAA7ADwAYQAgAGgAcgBlAGYAPQAiAGgAdAB0AHAA +cwA6AC8ALwB3AHcAdwAuAGcAbgB1AC4AbwByAGcALwBzAG8AZgB0AHcAYQByAGUALwBnAGUAbgBn +AGUAbgAvACIAPgBoAHQAdABwAHMAOgAvAC8AdwB3AHcALgBnAG4AdQAuAG8AcgBnAC8AcwBvAGYA +dAB3AGEAcgBlAC8AZwBlAG4AZwBlAG4ALwA8AC8AYQA+ACYAbgBiAHMAcAA7AGYAbwByAA0ACgBp +AG4AdAByAG8AZAB1AGMAZQAgAGEAbgBkACAAZQB4AGEAbQBwAGwAZQBzAC4AIABCAGEAcwBpAGMA +YQBsAGwAeQAgAGYAbwByACAAYQBuACAAaQBuAHAAdQB0ACAAdABlAG0AcABsAGEAdABlACAAdwBp +AHQAaAAgAG0AYQBuAHkAIABwAGwAYQBjAGUALQA8AHMAcABhAG4ADQAKAGMAbABhAHMAcwA9AFMA +cABlAGwAbABFAD4AdABhAGsAZQBuAHMAPAAvAHMAcABhAG4APgAgAGwAaQBrAGUAJgBuAGIAcwBw +ADsAQAB4AHgAeABAACAAYQBuAGQAIABAADwAcwBwAGEAbgAgAGMAbABhAHMAcwA9AFMAcABlAGwA +bABFAD4AeQB5AHkAPAAvAHMAcABhAG4APgBAACwADQAKAEcATgBVACAAPABzAHAAYQBuACAAYwBs +AGEAcwBzAD0AUwBwAGUAbABsAEUAPgBnAGUAbgBnAGUAbgA8AC8AcwBwAGEAbgA+ACAAZwBlAG4A +ZQByAGEAdABlAHMAIABhACAAQwArACsAIABjAGwAYQBzAHMAIAAoAGkAbgAgAGEAbgAgAG8AdQB0 +AHAAdQB0ACAALgBoAA0ACgBmAGkAbABlACkAIAB3AGkAdABoACAAcAB1AGIAbABpAGMAIABtAGUA +dABoAG8AZABzACAAdABvACAAYQBzAHMAaQBnAG4AIABhAG4AZAAgAHIAZQBwAGwAYQBjAGUAIAB0 +AGgAZQAgAHYAYQBsAHUAZQAgAG8AZgAgAHAAbABhAGMAZQAtADwAcwBwAGEAbgANAAoAYwBsAGEA +cwBzAD0AUwBwAGUAbABsAEUAPgB0AGEAawBlAG4AcwA8AC8AcwBwAGEAbgA+AC4APAAvAHAAPgAN +AAoADQAKADwAcAA+AFQAaABlACAAdABlAHMAdAAgAGcAZQBuAGUAcgBhAHQAbwByACAAZgBvAHIA +IAA8AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHAAbQBwADwALwBzAHAAYQBu +AD4AIABlAG4AaABhAG4AYwBlAG0AZQBuAHQAIABpAHMADQAKAGMAbwBuAHMAdAByAHUAYwB0AGUA +ZAAgAGIAeQAgAGYAbwBsAGwAbwB3AGkAbgBnACAAcABhAHIAdABzADoAPAAvAHAAPgANAAoADQAK +ADwAdQBsACAAdAB5AHAAZQA9AHMAcQB1AGEAcgBlAD4ADQAKACAAPABsAGkAIABjAGwAYQBzAHMA +PQBNAHMAbwBOAG8AcgBtAGEAbAAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBtAGEAcgBnAGkAbgAt +AHQAbwBwAC0AYQBsAHQAOgBhAHUAdABvADsAbQBzAG8ALQBtAGEAcgBnAGkAbgAtAGIAbwB0AHQA +bwBtAC0AYQBsAHQAOgBhAHUAdABvADsADQAKACAAIAAgACAAIABtAHMAbwAtAGwAaQBzAHQAOgBs +ADIAIABsAGUAdgBlAGwAMQAgAGwAZgBvADEAMgA7AHQAYQBiAC0AcwB0AG8AcABzADoAbABpAHMA +dAAgADMANgAuADAAcAB0ACcAPgA8AHMAcABhAG4AIABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AZgBh +AHIAZQBhAHMAdAAtAGYAbwBuAHQALQBmAGEAbQBpAGwAeQA6AA0ACgAgACAAIAAgACAAIgBUAGkA +bQBlAHMAIABOAGUAdwAgAFIAbwBtAGEAbgAiACcAPgBJAG4AZgByAGEAIABmAGkAbABlAHMALAAg +AGkAbgBjAGwAdQBkAGkAbgBnACAAcwBpAG0AcABsAGkAZgBpAGUAZAAgAEMAUgBUACwAIAA8AHMA +cABhAG4ADQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHMAeQBzAGMAYQBs +AGwAPAAvAHMAcABhAG4APgAsACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUA +PgBsAGkAbgBrAHMAYwByAGkAcAB0AHMAPAAvAHMAcABhAG4APgAsACAAYQBuAGQAIAA8AHMAcABh +AG4ADQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AE0AYQBrAGUAZgBpAGwA +ZQA8AC8AcwBwAGEAbgA+AC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAv +AGwAaQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQA +eQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7 +AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0A +CgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAyACAAbABlAHYAZQBsADEAIABsAGYAbwAx +ADIAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAA +YQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBh +AG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4A +IgAnAD4AMwAgAGkAbgBwAHUAdAAgAHQAZQBtAHAAbABhAHQAZQAgAGYAaQBsAGUAcwAgACgALgA8 +AHMAcABhAG4AIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AGMAYwBfAHMAawBlAGwAPAAvAHMA +cABhAG4APgApAA0ACgAgACAAIAAgACAAZgBvAHIAIABDAFMAUgAsACAAbgBvAG4ALQBzAGgAYQBy +AGUAIABtAG8AZABlACAAbQBlAG0AbwByAHkAIABhAGMAYwBlAHMAcwAgAGEAbgBkACAAcwBoAGEA +cgBlACAAbQBvAGQAZQAgAG0AZQBtAG8AcgB5ACAAYQBjAGMAZQBzAHMADQAKACAAIAAgACAAIABz +AGUAcABhAHIAYQB0AGUAbAB5AC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4A +PAAvAGwAaQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABz +AHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQA +bwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7 +AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAyACAAbABlAHYAZQBsADEAIABsAGYA +bwAxADIAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABz +AHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0A +ZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBh +AG4AIgAnAD4AMQAgAGQAcgBpAHYAZQByACAAZgBpAGwAZQBzACAAdABvACAAdABhAGsAZQAgAHUA +cwBlACAAbwBmACAAZwBlAG4AZQByAGEAdABlAGQAIABDACsAKwAgAGMAbABhAHMAcwANAAoAIAAg +ACAAIAAgAGYAaQBsAGUAcwAuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ADwA +LwBsAGkAPgANAAoAPAAvAHUAbAA+AA0ACgANAAoAPABwAD4AVwBpAHQAaAAgAHQAaABlAHMAZQAg +AGYAaQBsAGUAcwAsACAAdABoAGUAIABzAHQAZQBwAHMAIAB0AG8AIAByAHUAbgAgAHQAZQBzAHQA +IABjAGEAcwBlAHMAIABhAHIAZQAgAGEAcwAgAGYAbwBsAGwAbwB3AGkAbgBnACAAKAA8AHMAcABh +AG4ADQAKAGMAbABhAHMAcwA9AFMAcABlAGwAbABFAD4ATQBhAGsAZQBmAGkAbABlADwALwBzAHAA +YQBuAD4AIAB0AGEAcwBrAHMAKQA6ADwALwBwAD4ADQAKAA0ACgA8AHUAbAAgAHQAeQBwAGUAPQBz +AHEAdQBhAHIAZQA+AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwA +IABzAHQAeQBsAGUAPQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1 +AHQAbwA7AG0AcwBvAC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQA +bwA7AA0ACgAgACAAIAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAzACAAbABlAHYAZQBsADEAIABs +AGYAbwAxADMAOwB0AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4A +PABzAHAAYQBuACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0 +AC0AZgBhAG0AaQBsAHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8A +bQBhAG4AIgAnAD4AVABvACAAaQBuAHYAbwBrAGUAIABHAE4AVQAgADwAcwBwAGEAbgAgAGMAbABh +AHMAcwA9AFMAcABlAGwAbABFAD4AZwBlAG4AZwBlAG4APAAvAHMAcABhAG4APgAgAHQAbwANAAoA +IAAgACAAIAAgAGcAZQBuAGUAcgBhAHQAZQAgAEMAKwArACAAYwBsAGEAcwBzACAAZgBpAGwAZQBz +AC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+AA0ACgAgADwA +bABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUAPQAnAG0AcwBv +AC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBvAC0AbQBhAHIA +ZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAAIAAgACAAbQBz +AG8ALQBsAGkAcwB0ADoAbAAzACAAbABlAHYAZQBsADEAIABsAGYAbwAxADMAOwB0AGEAYgAtAHMA +dABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAAcwB0AHkAbABl +AD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBsAHkAOgANAAoA +IAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4AVABvACAAYwBv +AG0AcABpAGwAZQAgAGQAcgBpAHYAZQByACAAZgBpAGwAZQAgAHQAbwBnAGUAdABoAGUAcgAgAHcA +aQB0AGgAIABnAGUAbgBlAHIAYQB0AGUAZAAgAEMAKwArAA0ACgAgACAAIAAgACAAYwBsAGEAcwBz +ACAAZgBpAGwAZQBzACwAIAB0AG8AIABnAGUAdAAgAHQAaABlACAAdABlAHMAdAAgAGcAZQBuAGUA +cgBhAHQAbwByAC4APABvADoAcAA+ADwALwBvADoAcAA+ADwALwBzAHAAYQBuAD4APAAvAGwAaQA+ +AA0ACgAgADwAbABpACAAYwBsAGEAcwBzAD0ATQBzAG8ATgBvAHIAbQBhAGwAIABzAHQAeQBsAGUA +PQAnAG0AcwBvAC0AbQBhAHIAZwBpAG4ALQB0AG8AcAAtAGEAbAB0ADoAYQB1AHQAbwA7AG0AcwBv +AC0AbQBhAHIAZwBpAG4ALQBiAG8AdAB0AG8AbQAtAGEAbAB0ADoAYQB1AHQAbwA7AA0ACgAgACAA +IAAgACAAbQBzAG8ALQBsAGkAcwB0ADoAbAAzACAAbABlAHYAZQBsADEAIABsAGYAbwAxADMAOwB0 +AGEAYgAtAHMAdABvAHAAcwA6AGwAaQBzAHQAIAAzADYALgAwAHAAdAAnAD4APABzAHAAYQBuACAA +cwB0AHkAbABlAD0AJwBtAHMAbwAtAGYAYQByAGUAYQBzAHQALQBmAG8AbgB0AC0AZgBhAG0AaQBs +AHkAOgANAAoAIAAgACAAIAAgACIAVABpAG0AZQBzACAATgBlAHcAIABSAG8AbQBhAG4AIgAnAD4A +VABvACAAcgB1AG4AIAB0AGgAZQAgAHQAZQBzAHQAIABnAGUAbgBlAHIAYQB0AG8AcgAuACAASAB1 +AG4AZAByAGUAZABzACAAbwBmACAAdABlAHMAdAAgAGMAYQBzAGUAcwAgAHcAaQBsAGwADQAKACAA +IAAgACAAIABiAGUAIABnAGUAbgBlAHIAYQB0AGUAZAAuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8 +AC8AcwBwAGEAbgA+ADwALwBsAGkAPgANAAoAIAA8AGwAaQAgAGMAbABhAHMAcwA9AE0AcwBvAE4A +bwByAG0AYQBsACAAcwB0AHkAbABlAD0AJwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AdABvAHAALQBh +AGwAdAA6AGEAdQB0AG8AOwBtAHMAbwAtAG0AYQByAGcAaQBuAC0AYgBvAHQAdABvAG0ALQBhAGwA +dAA6AGEAdQB0AG8AOwANAAoAIAAgACAAIAAgAG0AcwBvAC0AbABpAHMAdAA6AGwAMwAgAGwAZQB2 +AGUAbAAxACAAbABmAG8AMQAzADsAdABhAGIALQBzAHQAbwBwAHMAOgBsAGkAcwB0ACAAMwA2AC4A +MABwAHQAJwA+ADwAcwBwAGEAbgAgAHMAdAB5AGwAZQA9ACcAbQBzAG8ALQBmAGEAcgBlAGEAcwB0 +AC0AZgBvAG4AdAAtAGYAYQBtAGkAbAB5ADoADQAKACAAIAAgACAAIAAiAFQAaQBtAGUAcwAgAE4A +ZQB3ACAAUgBvAG0AYQBuACIAJwA+AFQAbwAgAGMAbwBtAHAAaQBsAGUAIABhAG4AZAAgAHIAdQBu +ACAAdABlAHMAdAAgAGMAYQBzAGUAcwAgACgAdwBpAHQAaAAgAEMAUgBUACwAIAA8AHMAcABhAG4A +DQAKACAAIAAgACAAIABjAGwAYQBzAHMAPQBTAHAAZQBsAGwARQA+AHMAeQBzAGMAYQBsAGwAPAAv +AHMAcABhAG4APgAsACAAPABzAHAAYQBuACAAYwBsAGEAcwBzAD0AUwBwAGUAbABsAEUAPgBsAGkA +bgBrAHMAYwBpAHAAdABzADwALwBzAHAAYQBuAD4AKQAgAG8AbgBlACAAYgB5AA0ACgAgACAAIAAg +ACAAbwBuAGUAIABvAG4AIABiAG8AdABoACAAcwBwAGkAawBlACAAYQBuAGQAIABzAGEAaQBsACAA +QwAtAGUAbQB1AGwAYQB0AG8AcgAuADwAbwA6AHAAPgA8AC8AbwA6AHAAPgA8AC8AcwBwAGEAbgA+ +ADwALwBsAGkAPgANAAoAPAAvAHUAbAA+AA0ACgANAAoAPABwAD4AQwB1AHIAcgBlAG4AdABsAHkA +IAB0AGgAZQByAGUAIABhAHIAZQAgADEAMgA4ACAAdABlAHMAdABzACAAZgBvAHIAIABDAFMAUgAg +AGEAYwBjAGUAcwBzACwAIAA1ADIAOAAgAHQAZQBzAHQAcwAgAGYAbwByACAAbgBvAG4ALQBzAGgA +YQByAGUAIABtAG8AZABlAA0ACgBhAGMAYwBlAHMAcwAgAGEAbgBkACAAMgA0ACAAdABlAHMAdABz +ACAAZgBvAHIAIABzAGgAYQByAGUAIABtAG8AZABlACAAYQBjAGMAZQBzAHMALgA8AC8AcAA+AA0A +CgANAAoAPABwAD4APABvADoAcAA+ACYAbgBiAHMAcAA7ADwALwBvADoAcAA+ADwALwBwAD4ADQAK +AA0ACgA8AC8AZABpAHYAPgANAAoADQAKADwALwBiAG8AZAB5AD4ADQAKAA0ACgA8AC8AaAB0AG0A +bAA+AA0ACgA= + +------=_NextPart_01D7437B.526C0BD0 +Content-Location: file:///C:/2AEBA2D4/pmp+enhancement+-+spike,+sail+and+unit+test_files/themedata.thmx +Content-Transfer-Encoding: base64 +Content-Type: application/vnd.ms-officetheme + +UEsDBBQABgAIAAAAIQDp3g+//wAAABwCAAATAAAAW0NvbnRlbnRfVHlwZXNdLnhtbKyRy07DMBBF +90j8g+UtSpyyQAgl6YLHjseifMDImSQWydiyp1X790zSVEKoIBZsLNkz954743K9Hwe1w5icp0qv +8kIrJOsbR12l3zdP2a1WiYEaGDxhpQ+Y9Lq+vCg3h4BJiZpSpXvmcGdMsj2OkHIfkKTS+jgCyzV2 +JoD9gA7NdVHcGOuJkTjjyUPX5QO2sB1YPe7l+Zgk4pC0uj82TqxKQwiDs8CS1Oyo+UbJFkIuyrkn +9S6kK4mhzVnCVPkZsOheZTXRNajeIPILjBLDsAyJX89nIBkt5r87nons29ZZbLzdjrKOfDZezE7B +/xRg9T/oE9PMf1t/AgAA//8DAFBLAwQUAAYACAAAACEApdan58AAAAA2AQAACwAAAF9yZWxzLy5y +ZWxzhI/PasMwDIfvhb2D0X1R0sMYJXYvpZBDL6N9AOEof2giG9sb69tPxwYKuwiEpO/3qT3+rov5 +4ZTnIBaaqgbD4kM/y2jhdj2/f4LJhaSnJQhbeHCGo3vbtV+8UNGjPM0xG6VItjCVEg+I2U+8Uq5C +ZNHJENJKRds0YiR/p5FxX9cfmJ4Z4DZM0/UWUtc3YK6PqMn/s8MwzJ5PwX+vLOVFBG43lExp5GKh +qC/jU72QqGWq1B7Qtbj51v0BAAD//wMAUEsDBBQABgAIAAAAIQBreZYWgwAAAIoAAAAcAAAAdGhl +bWUvdGhlbWUvdGhlbWVNYW5hZ2VyLnhtbAzMTQrDIBBA4X2hd5DZN2O7KEVissuuu/YAQ5waQceg +0p/b1+XjgzfO3xTVm0sNWSycBw2KZc0uiLfwfCynG6jaSBzFLGzhxxXm6XgYybSNE99JyHNRfSPV +kIWttd0g1rUr1SHvLN1euSRqPYtHV+jT9yniResrJgoCOP0BAAD//wMAUEsDBBQABgAIAAAAIQC2 +9GeYkwcAAMkgAAAWAAAAdGhlbWUvdGhlbWUvdGhlbWUxLnhtbOxZzYsbyRW/B/I/NH2X9dWtj8Hy +ok/P2jO2sWSHPdZIpe7yVHeJqtKMxWII3lMugcAm5JCFve0hhCzswi655I8x2CSbPyKvqlvdVVLJ +nhkcMGFGMHSXfu/Vr9579d5T1d3PXibUu8BcEJb2/Pqdmu/hdM4WJI16/rPZpNLxPSFRukCUpbjn +b7DwP7v361/dRUcyxgn2QD4VR6jnx1KujqpVMYdhJO6wFU7huyXjCZLwyqPqgqNL0JvQaqNWa1UT +RFLfS1ECah8vl2SOvZlS6d/bKh9TeE2lUANzyqdKNbYkNHZxXlcIsRFDyr0LRHs+zLNglzP8Uvoe +RULCFz2/pv/86r27VXSUC1F5QNaQm+i/XC4XWJw39Jw8OismDYIwaPUL/RpA5T5u3B63xq1Cnwag ++RxWmnGxdbYbwyDHGqDs0aF71B416xbe0N/c49wP1cfCa1CmP9jDTyZDsKKF16AMH+7hw0F3MLL1 +a1CGb+3h27X+KGhb+jUopiQ930PXwlZzuF1tAVkyeuyEd8Ng0m7kyksUREMRXWqKJUvloVhL0AvG +JwBQQIokST25WeElmkMUDxElZ5x4JySKIfBWKGUChmuN2qTWhP/qE+gn7VF0hJEhrXgBE7E3pPh4 +Ys7JSvb8B6DVNyBvf/75zesf37z+6c1XX715/fd8bq3KkjtGaWTK/fLdH/7zzW+9f//w7S9f/zGb +ehcvTPy7v/3u3T/++T71sOLSFG//9P27H79/++ff/+uvXzu09zk6M+EzkmDhPcKX3lOWwAId/PEZ +v57ELEbElOinkUApUrM49I9lbKEfbRBFDtwA23Z8ziHVuID31y8swtOYryVxaHwYJxbwlDE6YNxp +hYdqLsPMs3UauSfnaxP3FKEL19xDlFpeHq9XkGOJS+UwxhbNJxSlEkU4xdJT37FzjB2r+4IQy66n +ZM6ZYEvpfUG8ASJOk8zImRVNpdAxScAvGxdB8Ldlm9Pn3oBR16pH+MJGwt5A1EF+hqllxvtoLVHi +UjlDCTUNfoJk7CI53fC5iRsLCZ6OMGXeeIGFcMk85rBew+kPIc243X5KN4mN5JKcu3SeIMZM5Iid +D2OUrFzYKUljE/u5OIcQRd4TJl3wU2bvEPUOfkDpQXc/J9hy94ezwTPIsCalMkDUN2vu8OV9zKz4 +nW7oEmFXqunzxEqxfU6c0TFYR1Zon2BM0SVaYOw9+9zBYMBWls1L0g9iyCrH2BVYD5Adq+o9xQJ6 +JdXc7OfJEyKskJ3iiB3gc7rZSTwblCaIH9L8CLxu2nwMpS5xBcBjOj83gY8I9IAQL06jPBagwwju +g1qfxMgqYOpduON1wy3/XWWPwb58YdG4wr4EGXxtGUjspsx7bTND1JqgDJgZgi7DlW5BxHJ/KaKK +qxZbO+WW9qYt3QDdkdX0JCT9YAe00/uE/7veBzqMt3/5xrEPPk6/41ZsJatrdjqHksnxTn9zCLfb +1QwZX5BPv6kZoXX6BEMd2c9Ytz3NbU/j/9/3NIf2820nc6jfuO1kfOgwbjuZ/HDl43QyZfMCfY06 +8MgOevSxT3Lw1GdJKJ3KDcUnQh/8CPg9s5jAoJLTJ564OAVcxfCoyhxMYOEijrSMx5n8DZHxNEYr +OB2q+0pJJHLVkfBWTMChkR526lZ4uk5O2SI77KzX1cFmVlkFkuV4LSzG4aBKZuhWuzzAK9RrtpE+ +aN0SULLXIWFMZpNoOki0t4PKSPpYF4zmIKFX9lFYdB0sOkr91lV7LIBa4RX4we3Bz/SeHwYgAkJw +HgfN+UL5KXP11rvamR/T04eMaUUANNjbCCg93VVcDy5PrS4LtSt42iJhhJtNQltGN3gihp/BeXSq +0avQuK6vu6VLLXrKFHo+CK2SRrvzPhY39TXI7eYGmpqZgqbeZc9vNUMImTla9fwlHBrDY7KC2BHq +NxeiEdy8zCXPNvxNMsuKCzlCIs4MrpNOlg0SIjH3KEl6vlp+4Qaa6hyiudUbkBA+WXJdSCufGjlw +uu1kvFziuTTdbowoS2evkOGzXOH8VovfHKwk2RrcPY0Xl94ZXfOnCEIsbNeVARdEwN1BPbPmgsBl +WJHIyvjbKUx52jVvo3QMZeOIrmKUVxQzmWdwncoLOvqtsIHxlq8ZDGqYJC+EZ5EqsKZRrWpaVI2M +w8Gq+2EhZTkjaZY108oqqmq6s5g1w7YM7NjyZkXeYLU1MeQ0s8JnqXs35Xa3uW6nTyiqBBi8sJ+j +6l6hIBjUysksaorxfhpWOTsftWvHdoEfoHaVImFk/dZW7Y7dihrhnA4Gb1T5QW43amFoue0rtaX1 +rbl5sc3OXkDyGEGXu6ZSaFfCyS5H0BBNdU+SpQ3YIi9lvjXgyVtz0vO/rIX9YNgIh5VaJxxXgmZQ +q3TCfrPSD8NmfRzWa6NB4xUUFhkn9TC7sZ/ABQbd5Pf2enzv7j7Z3tHcmbOkyvTdfFUT13f39cbh +u3uPQNL5stWYdJvdQavSbfYnlWA06FS6w9agMmoN26PJaBh2upNXvnehwUG/OQxa406lVR8OK0Gr +puh3upV20Gj0g3a/Mw76r/I2BlaepY/cFmBezevefwEAAP//AwBQSwMEFAAGAAgAAAAhAA3RkJ+2 +AAAAGwEAACcAAAB0aGVtZS90aGVtZS9fcmVscy90aGVtZU1hbmFnZXIueG1sLnJlbHOEj00KwjAU +hPeCdwhvb9O6EJEm3YjQrdQDhOQ1DTY/JFHs7Q2uLAguh2G+mWm7l53JE2My3jFoqhoIOumVcZrB +bbjsjkBSFk6J2TtksGCCjm837RVnkUsoTSYkUiguMZhyDidKk5zQilT5gK44o49W5CKjpkHIu9BI +93V9oPGbAXzFJL1iEHvVABmWUJr/s/04GolnLx8WXf5RQXPZhQUoosbM4CObqkwEylu6usTfAAAA +//8DAFBLAQItABQABgAIAAAAIQDp3g+//wAAABwCAAATAAAAAAAAAAAAAAAAAAAAAABbQ29udGVu +dF9UeXBlc10ueG1sUEsBAi0AFAAGAAgAAAAhAKXWp+fAAAAANgEAAAsAAAAAAAAAAAAAAAAAMAEA +AF9yZWxzLy5yZWxzUEsBAi0AFAAGAAgAAAAhAGt5lhaDAAAAigAAABwAAAAAAAAAAAAAAAAAGQIA +AHRoZW1lL3RoZW1lL3RoZW1lTWFuYWdlci54bWxQSwECLQAUAAYACAAAACEAtvRnmJMHAADJIAAA +FgAAAAAAAAAAAAAAAADWAgAAdGhlbWUvdGhlbWUvdGhlbWUxLnhtbFBLAQItABQABgAIAAAAIQAN +0ZCftgAAABsBAAAnAAAAAAAAAAAAAAAAAJ0KAAB0aGVtZS90aGVtZS9fcmVscy90aGVtZU1hbmFn +ZXIueG1sLnJlbHNQSwUGAAAAAAUABQBdAQAAmAsAAAAA + +------=_NextPart_01D7437B.526C0BD0 +Content-Location: file:///C:/2AEBA2D4/pmp+enhancement+-+spike,+sail+and+unit+test_files/colorschememapping.xml +Content-Transfer-Encoding: quoted-printable +Content-Type: text/xml + + + +------=_NextPart_01D7437B.526C0BD0 +Content-Location: file:///C:/2AEBA2D4/pmp+enhancement+-+spike,+sail+and+unit+test_files/filelist.xml +Content-Transfer-Encoding: quoted-printable +Content-Type: text/xml; charset="utf-8" + + + + + + + +------=_NextPart_01D7437B.526C0BD0-- diff --git a/vendor/riscv-isa-sim/tests/mseccfg/syscalls.c b/vendor/riscv-isa-sim/tests/mseccfg/syscalls.c new file mode 100644 index 00000000..9b526f29 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/syscalls.c @@ -0,0 +1,485 @@ +// See LICENSE for license details. + +#include +#include +#include +#include +#include +#include +#include "util.h" + +#define SYS_write 64 + +#undef strcmp + +extern volatile uint64_t tohost; +extern volatile uint64_t fromhost; + +static uintptr_t syscall(uintptr_t which, uint64_t arg0, uint64_t arg1, uint64_t arg2) +{ + volatile uint64_t magic_mem[8] __attribute__((aligned(64))); + magic_mem[0] = which; + magic_mem[1] = arg0; + magic_mem[2] = arg1; + magic_mem[3] = arg2; + __sync_synchronize(); + + tohost = (uintptr_t)magic_mem; + while (fromhost == 0) + ; + fromhost = 0; + + __sync_synchronize(); + return magic_mem[0]; +} + +#define NUM_COUNTERS 2 +static uintptr_t counters[NUM_COUNTERS]; +static char* counter_names[NUM_COUNTERS]; + +void setStats(int enable) +{ + int i = 0; +#define READ_CTR(name) do { \ + while (i >= NUM_COUNTERS) ; \ + uintptr_t csr = read_csr(name); \ + if (!enable) { csr -= counters[i]; counter_names[i] = #name; } \ + counters[i++] = csr; \ + } while (0) + + READ_CTR(mcycle); + READ_CTR(minstret); + +#undef READ_CTR +} + +void __attribute__((noreturn)) tohost_exit(uintptr_t code) +{ + tohost = (code << 1) | 1; + while (1); +} + +uintptr_t __attribute__((weak)) handle_trap(uintptr_t cause, uintptr_t epc, uintptr_t regs[32]) +{ + tohost_exit(1337); +} + +void exit(int code) +{ + tohost_exit(code); +} + +void abort() +{ + exit(128 + SIGABRT); +} + +void printstr(const char* s) +{ + syscall(SYS_write, 1, (uintptr_t)s, strlen(s)); +} + +void __attribute__((weak)) thread_entry(int cid, int nc) +{ + // multi-threaded programs override this function. + // for the case of single-threaded programs, only let core 0 proceed. + while (cid != 0); +} + +int __attribute__((weak)) main(int argc, char** argv) +{ + // single-threaded programs override this function. + printstr("Implement main(), foo!\n"); + return -1; +} + +static void init_tls() +{ + register void* thread_pointer asm("tp"); + extern char _tdata_begin, _tdata_end, _tbss_end; + size_t tdata_size = &_tdata_end - &_tdata_begin; + memcpy(thread_pointer, &_tdata_begin, tdata_size); + size_t tbss_size = &_tbss_end - &_tdata_end; + memset(thread_pointer + tdata_size, 0, tbss_size); +} + +void _init(int cid, int nc) +{ + init_tls(); + thread_entry(cid, nc); + + // only single-threaded programs should ever get here. + int ret = main(0, 0); + + char buf[NUM_COUNTERS * 32] __attribute__((aligned(64))); + char* pbuf = buf; + for (int i = 0; i < NUM_COUNTERS; i++) + if (counters[i]) + pbuf += sprintf(pbuf, "%s = %d\n", counter_names[i], counters[i]); + if (pbuf != buf) + printstr(buf); + + exit(ret); +} + +#undef putchar +int putchar(int ch) +{ + static __thread char buf[64] __attribute__((aligned(64), section(".tls_start"))); +#if DEBUG_FIRST_PUTCHAR + static __thread int buflen = -1; + + if (buflen == -1) { + for (int i=0; i<16; i++) { + buf[i] = ((uint64_t)buf >> (4 * (15-i))) & 0xF; + if (buf[i] < 10) buf[i] += '0'; + else buf[i] += 'A' - 10; + } + + buf[16] = '-'; + buf[17] = ' '; + buflen = 18; + } +#else + static __thread int buflen = 0; +#endif + + buf[buflen++] = ch; + + if (ch == '\n' || buflen == sizeof(buf)) + { + syscall(SYS_write, 1, (uintptr_t)buf, buflen); + buflen = 0; + } + + return 0; +} + +void printhex(uint64_t x) +{ + char str[17]; + int i; + for (i = 0; i < 16; i++) + { + str[15-i] = (x & 0xF) + ((x & 0xF) < 10 ? '0' : 'a'-10); + x >>= 4; + } + str[16] = 0; + + printstr(str); +} + +static inline void printnum(void (*putch)(int, void**), void **putdat, + unsigned long long num, unsigned base, int width, int padc) +{ + unsigned digs[sizeof(num)*CHAR_BIT]; + int pos = 0; + + while (1) + { + digs[pos++] = num % base; + if (num < base) + break; + num /= base; + } + + while (width-- > pos) + putch(padc, putdat); + + while (pos-- > 0) + putch(digs[pos] + (digs[pos] >= 10 ? 'a' - 10 : '0'), putdat); +} + +static unsigned long long getuint(va_list *ap, int lflag) +{ + if (lflag >= 2) + return va_arg(*ap, unsigned long long); + else if (lflag) + return va_arg(*ap, unsigned long); + else + return va_arg(*ap, unsigned int); +} + +static long long getint(va_list *ap, int lflag) +{ + if (lflag >= 2) + return va_arg(*ap, long long); + else if (lflag) + return va_arg(*ap, long); + else + return va_arg(*ap, int); +} + +static void vprintfmt(void (*putch)(int, void**), void **putdat, const char *fmt, va_list ap) +{ + register const char* p; + const char* last_fmt; + register int ch, err; + unsigned long long num; + int base, lflag, width, precision, altflag; + char padc; + + while (1) { + while ((ch = *(unsigned char *) fmt) != '%') { + if (ch == '\0') + return; + fmt++; + putch(ch, putdat); + } + fmt++; + + // Process a %-escape sequence + last_fmt = fmt; + padc = ' '; + width = -1; + precision = -1; + lflag = 0; + altflag = 0; + reswitch: + switch (ch = *(unsigned char *) fmt++) { + + // flag to pad on the right + case '-': + padc = '-'; + goto reswitch; + + // flag to pad with 0's instead of spaces + case '0': + padc = '0'; + goto reswitch; + + // width field + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + for (precision = 0; ; ++fmt) { + precision = precision * 10 + ch - '0'; + ch = *fmt; + if (ch < '0' || ch > '9') + break; + } + goto process_precision; + + case '*': + precision = va_arg(ap, int); + goto process_precision; + + case '.': + if (width < 0) + width = 0; + goto reswitch; + + case '#': + altflag = 1; + goto reswitch; + + process_precision: + if (width < 0) + width = precision, precision = -1; + goto reswitch; + + // long flag (doubled for long long) + case 'l': + lflag++; + goto reswitch; + + // character + case 'c': + putch(va_arg(ap, int), putdat); + break; + + // string + case 's': + if ((p = va_arg(ap, char *)) == NULL) + p = "(null)"; + if (width > 0 && padc != '-') + for (width -= strnlen(p, precision); width > 0; width--) + putch(padc, putdat); + for (; (ch = *p) != '\0' && (precision < 0 || --precision >= 0); width--) { + putch(ch, putdat); + p++; + } + for (; width > 0; width--) + putch(' ', putdat); + break; + + // (signed) decimal + case 'd': + num = getint(&ap, lflag); + if ((long long) num < 0) { + putch('-', putdat); + num = -(long long) num; + } + base = 10; + goto signed_number; + + // unsigned decimal + case 'u': + base = 10; + goto unsigned_number; + + // (unsigned) octal + case 'o': + // should do something with padding so it's always 3 octits + base = 8; + goto unsigned_number; + + // pointer + case 'p': + static_assert(sizeof(long) == sizeof(void*)); + lflag = 1; + putch('0', putdat); + putch('x', putdat); + /* fall through to 'x' */ + + // (unsigned) hexadecimal + case 'x': + base = 16; + unsigned_number: + num = getuint(&ap, lflag); + signed_number: + printnum(putch, putdat, num, base, width, padc); + break; + + // escaped '%' character + case '%': + putch(ch, putdat); + break; + + // unrecognized escape sequence - just print it literally + default: + putch('%', putdat); + fmt = last_fmt; + break; + } + } +} + +int printf(const char* fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + + vprintfmt((void*)putchar, 0, fmt, ap); + + va_end(ap); + return 0; // incorrect return value, but who cares, anyway? +} + +int sprintf(char* str, const char* fmt, ...) +{ + va_list ap; + char* str0 = str; + va_start(ap, fmt); + + void sprintf_putch(int ch, void** data) + { + char** pstr = (char**)data; + **pstr = ch; + (*pstr)++; + } + + vprintfmt(sprintf_putch, (void**)&str, fmt, ap); + *str = 0; + + va_end(ap); + return str - str0; +} + +void* memcpy(void* dest, const void* src, size_t len) +{ + if ((((uintptr_t)dest | (uintptr_t)src | len) & (sizeof(uintptr_t)-1)) == 0) { + const uintptr_t* s = src; + uintptr_t *d = dest; + while (d < (uintptr_t*)(dest + len)) + *d++ = *s++; + } else { + const char* s = src; + char *d = dest; + while (d < (char*)(dest + len)) + *d++ = *s++; + } + return dest; +} + +void* memset(void* dest, int byte, size_t len) +{ + if ((((uintptr_t)dest | len) & (sizeof(uintptr_t)-1)) == 0) { + uintptr_t word = byte & 0xFF; + word |= word << 8; + word |= word << 16; + word |= word << 16 << 16; + + uintptr_t *d = dest; + while (d < (uintptr_t*)(dest + len)) + *d++ = word; + } else { + char *d = dest; + while (d < (char*)(dest + len)) + *d++ = byte; + } + return dest; +} + +size_t strlen(const char *s) +{ + const char *p = s; + while (*p) + p++; + return p - s; +} + +size_t strnlen(const char *s, size_t n) +{ + const char *p = s; + while (n-- && *p) + p++; + return p - s; +} + +int strcmp(const char* s1, const char* s2) +{ + unsigned char c1, c2; + + do { + c1 = *s1++; + c2 = *s2++; + } while (c1 != 0 && c1 == c2); + + return c1 - c2; +} + +char* strcpy(char* dest, const char* src) +{ + char* d = dest; + while ((*d++ = *src++)) + ; + return dest; +} + +long atol(const char* str) +{ + long res = 0; + int sign = 0; + + while (*str == ' ') + str++; + + if (*str == '-' || *str == '+') { + sign = *str == '-'; + str++; + } + + while (*str) { + res *= 10; + res += *str++ - '0'; + } + + return sign ? -res : res; +} diff --git a/vendor/riscv-isa-sim/tests/mseccfg/util.h b/vendor/riscv-isa-sim/tests/mseccfg/util.h new file mode 100644 index 00000000..081cfd63 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/mseccfg/util.h @@ -0,0 +1,90 @@ +// See LICENSE for license details. + +#ifndef __UTIL_H +#define __UTIL_H + +extern void setStats(int enable); + +#include + +#define static_assert(cond) switch(0) { case 0: case !!(long)(cond): ; } + +static int verify(int n, const volatile int* test, const int* verify) +{ + int i; + // Unrolled for faster verification + for (i = 0; i < n/2*2; i+=2) + { + int t0 = test[i], t1 = test[i+1]; + int v0 = verify[i], v1 = verify[i+1]; + if (t0 != v0) return i+1; + if (t1 != v1) return i+2; + } + if (n % 2 != 0 && test[n-1] != verify[n-1]) + return n; + return 0; +} + +static int verifyDouble(int n, const volatile double* test, const double* verify) +{ + int i; + // Unrolled for faster verification + for (i = 0; i < n/2*2; i+=2) + { + double t0 = test[i], t1 = test[i+1]; + double v0 = verify[i], v1 = verify[i+1]; + int eq1 = t0 == v0, eq2 = t1 == v1; + if (!(eq1 & eq2)) return i+1+eq1; + } + if (n % 2 != 0 && test[n-1] != verify[n-1]) + return n; + return 0; +} + +static void __attribute__((noinline)) barrier(int ncores) +{ + static volatile int sense; + static volatile int count; + static __thread int threadsense; + + __sync_synchronize(); + + threadsense = !threadsense; + if (__sync_fetch_and_add(&count, 1) == ncores-1) + { + count = 0; + sense = threadsense; + } + else while(sense != threadsense) + ; + + __sync_synchronize(); +} + +static uint64_t lfsr(uint64_t x) +{ + uint64_t bit = (x ^ (x >> 1)) & 1; + return (x >> 1) | (bit << 62); +} + +static uintptr_t insn_len(uintptr_t pc) +{ + return (*(unsigned short*)pc & 3) ? 4 : 2; +} + +#ifdef __riscv +#include "encoding.h" +#endif + +#define stringify_1(s) #s +#define stringify(s) stringify_1(s) +#define stats(code, iter) do { \ + unsigned long _c = -read_csr(mcycle), _i = -read_csr(minstret); \ + code; \ + _c += read_csr(mcycle), _i += read_csr(minstret); \ + if (cid == 0) \ + printf("\n%s: %ld cycles, %ld.%ld cycles/iter, %ld.%ld CPI\n", \ + stringify(code), _c, _c/iter, 10*_c/iter%10, _c/_i, 10*_c/_i%10); \ + } while(0) + +#endif //__UTIL_H diff --git a/vendor/riscv-isa-sim/tests/testlib.py b/vendor/riscv-isa-sim/tests/testlib.py new file mode 100644 index 00000000..d5e8d795 --- /dev/null +++ b/vendor/riscv-isa-sim/tests/testlib.py @@ -0,0 +1,116 @@ +import os.path +import pexpect +import subprocess +import tempfile +import testlib +import unittest + +# Note that gdb comes with its own testsuite. I was unable to figure out how to +# run that testsuite against the spike simulator. + +def find_file(path): + for directory in (os.getcwd(), os.path.dirname(testlib.__file__)): + fullpath = os.path.join(directory, path) + if os.path.exists(fullpath): + return fullpath + return None + +def compile(*args): + """Compile a single .c file into a binary.""" + dst = os.path.splitext(args[0])[0] + cc = os.path.expandvars("$RISCV/bin/riscv64-unknown-elf-gcc") + cmd = [cc, "-g", "-O", "-o", dst] + for arg in args: + found = find_file(arg) + if found: + cmd.append(found) + else: + cmd.append(arg) + cmd = " ".join(cmd) + result = os.system(cmd) + assert result == 0, "%r failed" % cmd + return dst + +def unused_port(): + # http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python/2838309#2838309 + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(("",0)) + port = s.getsockname()[1] + s.close() + return port + +class Spike(object): + def __init__(self, binary, halted=False, with_gdb=True, timeout=None): + """Launch spike. Return tuple of its process and the port it's running on.""" + cmd = [] + if timeout: + cmd += ["timeout", str(timeout)] + + cmd += [find_file("spike")] + if halted: + cmd.append('-H') + if with_gdb: + self.port = unused_port() + cmd += ['--gdb-port', str(self.port)] + cmd.append('pk') + if binary: + cmd.append(binary) + logfile = open("spike.log", "w") + self.process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=logfile, + stderr=logfile) + + def __del__(self): + try: + self.process.kill() + self.process.wait() + except OSError: + pass + + def wait(self, *args, **kwargs): + return self.process.wait(*args, **kwargs) + +class Gdb(object): + def __init__(self): + path = os.path.expandvars("$RISCV/bin/riscv64-unknown-elf-gdb") + self.child = pexpect.spawn(path) + self.child.logfile = file("gdb.log", "w") + self.wait() + self.command("set width 0") + self.command("set height 0") + # Force consistency. + self.command("set print entry-values no") + + def wait(self): + """Wait for prompt.""" + self.child.expect("\(gdb\)") + + def command(self, command, timeout=-1): + self.child.sendline(command) + self.child.expect("\n", timeout=timeout) + self.child.expect("\(gdb\)", timeout=timeout) + return self.child.before.strip() + + def c(self, wait=True): + if wait: + return self.command("c") + else: + self.child.sendline("c") + self.child.expect("Continuing") + + def interrupt(self): + self.child.send("\003"); + self.child.expect("\(gdb\)") + + def x(self, address, size='w'): + output = self.command("x/%s %s" % (size, address)) + value = int(output.split(':')[1].strip(), 0) + return value + + def p(self, obj): + output = self.command("p %s" % obj) + value = int(output.split('=')[-1].strip()) + return value + + def stepi(self): + return self.command("stepi") diff --git a/vendor/riscv_isa_sim.lock.hjson b/vendor/riscv_isa_sim.lock.hjson index f71b9c61..3e076f28 100644 --- a/vendor/riscv_isa_sim.lock.hjson +++ b/vendor/riscv_isa_sim.lock.hjson @@ -8,7 +8,7 @@ { upstream: { - url: https://github.com/joxie/riscv-isa-sim - rev: c2186bf1731b2a123ccc785ce9585861d370886f + url: https://github.com/Saad525/riscv-isa-sim + rev: 9d44dcb2818c98412f9a264076d568dd5566d7f1 } } diff --git a/vendor/riscv_isa_sim.vendor.hjson b/vendor/riscv_isa_sim.vendor.hjson index 3310cfcd..381a1f24 100644 --- a/vendor/riscv_isa_sim.vendor.hjson +++ b/vendor/riscv_isa_sim.vendor.hjson @@ -2,11 +2,11 @@ // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 { - name: "epmp-tests", - target_dir: "epmp-tests", + name: "riscv-isa-sim", + target_dir: "riscv-isa-sim", upstream: { - url: "https://github.com/joxie/riscv-isa-sim", + url: "https://github.com/Saad525/riscv-isa-sim", rev: "master", }, -} +} \ No newline at end of file